mkldnn_reuse.h 54.6 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
17
#include <memory>
18
#include <sstream>
J
Jacek Czaja 已提交
19
#include <string>
20
#include <utility>
J
Jacek Czaja 已提交
21
#include <vector>
22
#include "boost/optional.hpp"
X
xiaoli.liu@intel.com 已提交
23
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
24
#include "paddle/fluid/framework/operator.h"
25
#include "paddle/fluid/operators/pool_op.h"
J
Jacek Czaja 已提交
26 27 28 29 30 31
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace platform {

32 33
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
34
using user_function = std::function<std::shared_ptr<float>(const float*)>;
35
using memory = mkldnn::memory;
J
Jacek Czaja 已提交
36

37 38
template <typename T, typename TForward,
          typename TBackward = mkldnn_dummy_primitive>
39 40 41 42 43 44 45 46
class MKLDNNHandlerT {
 public:
  MKLDNNHandlerT(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                 platform::Place cpu_place, const std::string& base_key)
      : dev_ctx_(dev_ctx),
        engine_(engine),
        place_(cpu_place),
        key_common_(base_key),
47
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)),
48
        fwd_pd_(nullptr),
49 50 51
        bwd_pd_(nullptr) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
52

A
Adam 已提交
53
  std::shared_ptr<TForward> AcquireForwardPrimitive() {
54
    const std::string key_p = key_ + "@fwd_p";
55 56 57
    auto forward_p =
        std::static_pointer_cast<TForward>(dev_ctx_.GetBlob(key_p));
    if (forward_p == nullptr) {
A
Adam 已提交
58
      forward_p = std::make_shared<TForward>(*fwd_pd_);
59 60 61 62 63
      dev_ctx_.SetBlob(key_p, forward_p);
    }
    return forward_p;
  }

A
Adam 已提交
64
  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
65
    const std::string key_p = key_ + "@bwd_p";
66 67 68
    auto backward_p =
        std::static_pointer_cast<TBackward>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
A
Adam 已提交
69
      backward_p = std::make_shared<TBackward>(*bwd_pd_);
70 71 72 73 74
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

75 76 77
  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
78 79
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src_mem_p");
80 81
  }

82
  template <typename T_out = T>
83
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output) {
84 85
    T_out* ptr =
        output->mutable_data<T_out>(place_, fwd_pd_->dst_desc().get_size());
A
Adam 已提交
86
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), ptr,
87 88 89
                                            "@dst_mem_p");
  }

90 91 92 93 94
  template <typename T_out = T>
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(void) {
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), "@dstt_mem_p");
  }

95
  template <typename T_out = T>
96 97
  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const framework::Tensor* output) {
98 99 100 101
    const T_out* output_data = output->data<T_out>();
    return this->AcquireMemoryFromPrimitive(bwd_pd_->dst_desc(),
                                            to_void_cast<T_out>(output_data),
                                            "@bwd-dst_mem_p");
102 103 104 105 106
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
A
Adam 已提交
107 108
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_dst_desc(), to_void_cast<T>(ptr), "@diff_dst_mem_p");
109 110 111 112
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
      framework::Tensor* diffsrc) {
A
Adam 已提交
113 114 115 116
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_src_desc(), ptr,
                                            "@diff_src_mem_p");
117 118
  }

119
 protected:
120
  bool isCached() {
121
    const std::string key_pd = key_common_ + "@fwd_pd";
122 123
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
124

125
    const std::string key_p = key_ + "@fwd_p";
126
    return (dev_ctx_.GetBlob(key_p) != nullptr);
127 128
  }

129 130 131 132 133 134 135 136 137
  bool isBwdCached() {
    const std::string key_pd = key_common_ + "@bwd_pd";
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));

    const std::string key_p = key_ + "@bwd_p";
    return (dev_ctx_.GetBlob(key_p) != nullptr);
  }

138 139 140 141 142 143
  // If your primitive descriptor requires attributes, pass them as a
  // first argument and paramters to descriptor constructor in the following
  // arguments. Otherwise, all arguments will be forwarded to descriptor
  // constructor, including the first one.
  template <typename Arg, typename... Args>
  void AcquireForwardPrimitiveDescriptor(Arg&& first_arg, Args&&... args) {
144 145 146
    // Forward PD has to be passed to Grad op that
    // may be executed by diffrent thread, hence
    // for that one we use key that does not contain TID
147
    const std::string key_pd = key_common_ + "@fwd_pd";
148 149 150 151 152 153 154 155 156
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);
      fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
          dev_ctx_.GetBlob(key_pd));
      if (fwd_pd_ == nullptr) {
157 158
        CreateForwardPrimitiveDescriptor(first_arg,
                                         std::forward<Args>(args)...);
159 160 161 162 163
        dev_ctx_.SetBlob(key_pd, fwd_pd_);
      }
    }
  }

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
  // Using sfinae to specialise variadic function. Workaround for not having
  // if constexpr in C++ 11.
  template <class First, class... Args>
  typename std::enable_if<std::is_same<typename std::decay<First>::type,
                                       dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
    fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(
        fwd_desc, first, engine_);
  }

  template <class First, class... Args>
  typename std::enable_if<!std::is_same<typename std::decay<First>::type,
                                        dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<First>(first),
                                            std::forward<Args>(args)...);
    fwd_pd_ =
        std::make_shared<typename TForward::primitive_desc>(fwd_desc, engine_);
  }

185 186
  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
187
    const std::string key_fwd_pd = key_common_ + "@fwd_pd";
188 189
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_fwd_pd));
G
GaoWei8 已提交
190 191 192
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_, platform::errors::Unavailable(
                     "Get MKLDNN Forward primitive %s failed.", key_fwd_pd));
193
    const std::string key_pd = key_ + "@bwd_pd";
194 195 196 197 198 199 200 201 202 203
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

204 205 206 207 208 209
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      const std::string& suffix) {
    return std::static_pointer_cast<mkldnn::memory>(
        dev_ctx_.GetBlob(key_ + suffix));
  }

210
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
211
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
212
    const auto local_key = key_ + suffix;
213 214 215
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
216
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
217 218 219 220 221 222 223
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

224 225 226 227 228 229 230 231 232 233 234 235
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      mkldnn::memory::desc md, const std::string& suffix) {
    const auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      mem_p = std::make_shared<mkldnn::memory>(md, engine_);
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

236 237 238 239 240 241 242 243 244 245 246 247 248 249
  void AcquireReorder(const std::shared_ptr<mkldnn::memory>& user_memory_p,
                      const std::shared_ptr<mkldnn::memory>& target_memory_p,
                      const std::string& suffix) {
    const auto key_reorder_p = key_ + suffix + "reorder_p";

    auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
    }

250
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
251 252 253

    platform::RecordEvent record_reorder("int_reorder",
                                         platform::EventRole::kUniqueOp);
254 255 256 257 258
    reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                 {MKLDNN_ARG_TO, *target_memory_p}});
    astream.wait();
  }

259
  template <typename F = T>
260 261 262
  std::shared_ptr<mkldnn::memory> AcquireMemoryWithReorder(
      const mkldnn::memory::desc& user_md,
      const mkldnn::memory::desc& target_md, void* ptr,
263 264
      const std::string& suffix, bool is_persistent = false,
      std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {}) {
265 266 267 268 269 270 271 272
    const auto target_key = key_ + suffix + "_target";
    const auto key_reorder_p = key_ + suffix + "reorder_p";
    const auto user_key = key_ + suffix + "_user";

    auto target_memory_p =
        std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(target_key));

    if (target_memory_p == nullptr) {
273 274 275 276 277 278
      if (custom_reorder_func) {
        auto reordered_data =
            custom_reorder_func(reinterpret_cast<const F*>(ptr));
        dev_ctx_.SetBlob(key_reorder_p + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }
279 280 281 282 283 284 285 286
      auto user_memory_p =
          std::make_shared<dnnl::memory>(user_md, engine_, ptr);
      if (user_md != target_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(target_md, engine_);
        auto reorder_p =
            std::make_shared<dnnl::reorder>(*user_memory_p, *target_memory_p);
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);

287
        auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
288 289
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
290 291 292 293 294 295 296 297 298
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      } else {
        target_memory_p = user_memory_p;
      }
      dev_ctx_.SetBlob(user_key, user_memory_p);
      dev_ctx_.SetBlob(target_key, target_memory_p);
    } else if (!is_persistent) {
299
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
300 301 302 303 304 305 306 307

      auto user_memory_p =
          std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(user_key));
      user_memory_p->set_data_handle(ptr);

      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
308 309
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
310 311 312 313 314 315 316 317
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
      }
    }
    return target_memory_p;
  }

318 319 320 321 322 323
  std::shared_ptr<mkldnn::memory> AcquireMemory(const std::string& suffix) {
    const auto local_key = key_ + suffix;
    return std::static_pointer_cast<mkldnn::memory>(
        dev_ctx_.GetBlob(local_key));
  }

324 325 326 327
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
  platform::Place place_;
  std::string key_common_;
328
  std::string key_;
329 330 331 332 333
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
};

// TODO(grygielski) this class will be deleted later.
J
Jacek Czaja 已提交
334 335 336 337
class MKLDNNHandler {
 public:
  MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
                const std::string& base_key)
338 339 340
      : dev_ctx_(dev_ctx),
        engine_(engine),
        key_common_(base_key),
341 342 343
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
J
Jacek Czaja 已提交
344 345 346 347 348 349 350 351 352 353 354

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_src_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_dst_mem_p");
  }

A
Adam 已提交
355
  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemory(
J
Jacek Czaja 已提交
356
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
357
    return this->AcquireMemory(md, ptr, "@user_diff_src_mem_p");
J
Jacek Czaja 已提交
358 359
  }

A
Adam 已提交
360
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemory(
J
Jacek Czaja 已提交
361
      const mkldnn::memory::desc& md, void* ptr) {
A
Adam 已提交
362
    return this->AcquireMemory(md, ptr, "@user_diff_dst_mem_p");
J
Jacek Czaja 已提交
363 364 365
  }

  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
A
Adam 已提交
366
      mkldnn::memory::desc md, void* ptr, const std::string& suffix) {
J
Jacek Czaja 已提交
367 368 369 370
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
371
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
372 373 374 375 376 377 378
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

379 380 381 382 383 384 385 386 387 388 389 390
  std::shared_ptr<mkldnn::memory> AcquireMemoryFromPrimitive(
      mkldnn::memory::desc md, const std::string& suffix) {
    const auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      mem_p = std::make_shared<mkldnn::memory>(md, engine_);
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
  // This incarnation of AcquireMemory can call user function eg. custom reorder
  // or preprocessing routine if needed
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const mkldnn::memory::desc& md, void* ptr, const std::string& suffix,
      user_function custom_func = {}) {
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Call custom reorder/preprocessing func if available
      if (custom_func) {
        auto reordered_data = custom_func(reinterpret_cast<const float*>(ptr));
        dev_ctx_.SetBlob(local_key + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }

A
Adam 已提交
408
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
J
Jacek Czaja 已提交
409 410 411 412 413 414 415
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

416
  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
417
      const std::vector<int64_t>& dims, const mkldnn::memory::data_type dtype,
418
      const MKLDNNMemoryFormat& fmt, void* ptr, const std::string& suffix) {
419 420 421 422 423 424 425
    /*Generate key*/
    auto local_key = key_ + suffix;
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto md = mkldnn::memory::desc(dims, dtype, fmt);

A
Adam 已提交
426
      mem_p = std::make_shared<mkldnn::memory>(md, engine_, ptr);
427 428 429 430 431 432 433
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

J
Jacek Czaja 已提交
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
  std::shared_ptr<mkldnn::memory> AcquireMemory(
      const std::shared_ptr<mkldnn::memory>& user_memory_p,
      const std::shared_ptr<mkldnn::memory>& target_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto stored_reorder_p = std::static_pointer_cast<mkldnn::reorder>(
        dev_ctx_.GetBlob(key_reorder_p));

    if (stored_reorder_p) {
      pipeline.push_back(*stored_reorder_p);
    } else {
      auto reorder_p =
          std::make_shared<mkldnn::reorder>(*user_memory_p, *target_memory_p);
      dev_ctx_.SetBlob(key_reorder_p, reorder_p);
451
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
452 453
      platform::RecordEvent record_reorder("int_reorder",
                                           platform::EventRole::kUniqueOp);
A
Adam 已提交
454 455 456
      reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                   {MKLDNN_ARG_TO, *target_memory_p}});
      astream.wait();
J
Jacek Czaja 已提交
457 458 459 460 461 462
    }

    return target_memory_p;
  }

  std::shared_ptr<mkldnn::memory> AcquireMemory(
A
Adam 已提交
463 464
      mkldnn::memory::desc& md,       // NOLINT
      mkldnn::memory::desc& user_md,  // NOLINT
J
Jacek Czaja 已提交
465 466 467
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      const std::string& suffix,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
468 469
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
J
Jacek Czaja 已提交
470 471 472 473 474 475
    // create reorder primitive if the input format is not the preferred one
    auto local_key = key_ + suffix;
    auto key_reorder_p = key_ + suffix + "reorder_p";

    auto target_memory_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
A
Adam 已提交
476

477
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
A
Adam 已提交
478

J
Jacek Czaja 已提交
479 480
    if (target_memory_p == nullptr) {
      target_memory_p = user_memory_p;
A
Adam 已提交
481 482 483
      if (md != user_md) {
        target_memory_p = std::make_shared<mkldnn::memory>(md, engine_);
        std::shared_ptr<mkldnn::reorder::primitive_desc> reorder_pd;
484 485 486 487 488
        if (is_INT8) {
          mkldnn::primitive_attr
              attri;  // attribute for int8 weights and bias data reorder.
          attri.set_output_scales(mask, scale_data);

A
Adam 已提交
489 490 491
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p, attri));
492
        } else {
A
Adam 已提交
493 494 495
          reorder_pd = std::shared_ptr<mkldnn::reorder::primitive_desc>(
              new mkldnn::reorder::primitive_desc(*user_memory_p,
                                                  *target_memory_p));
496
        }
A
Adam 已提交
497 498
        auto reorder_p =
            std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(*reorder_pd));
J
Jacek Czaja 已提交
499
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);
A
Adam 已提交
500

501 502
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
A
Adam 已提交
503 504 505
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
506 507 508 509 510 511 512
      }
      dev_ctx_.SetBlob(local_key, target_memory_p);
    } else if (!is_persistent) {
      // Make reorder if needed
      auto reorder_p = std::static_pointer_cast<mkldnn::reorder>(
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
513 514
        platform::RecordEvent record_reorder("int_reorder",
                                             platform::EventRole::kUniqueOp);
A
Adam 已提交
515 516 517
        reorder_p->execute(astream, {{MKLDNN_ARG_FROM, *user_memory_p},
                                     {MKLDNN_ARG_TO, *target_memory_p}});
        astream.wait();
J
Jacek Czaja 已提交
518 519 520 521 522 523 524 525
      }
    }
    return target_memory_p;
  }

 protected:
  const MKLDNNDeviceContext& dev_ctx_;
  mkldnn::engine engine_;
526
  std::string key_common_;
527
  std::string key_;
J
Jacek Czaja 已提交
528 529
};

530 531 532
template <typename T>
class BinaryMKLDNNHandler : public platform::MKLDNNHandlerT<T, dnnl::binary> {
 public:
533 534
  BinaryMKLDNNHandler(const dnnl::algorithm algo, const int axis,
                      const MKLDNNDeviceContext& dev_ctx,
535 536
                      const mkldnn::engine engine, platform::Place cpu_place,
                      const Tensor* x, const Tensor* y, Tensor* z,
537
                      float scale_x, float scale_y, float scale_z,
538
                      const std::string& uniq_name)
539
      : platform::MKLDNNHandlerT<T, dnnl::binary>(
540
            dev_ctx, engine, cpu_place,
541
            platform::CreateKey(
542 543
                dev_ctx, framework::vectorize(x->dims()), uniq_name,
                (algo == dnnl::algorithm::binary_mul ? "M" : ""))) {
544
    // bradcasting combined with in-place may require
545 546
    auto rankdiff = x->dims().size() - y->dims().size();
    if (rankdiff > 0) {
547 548 549
      auto suffix = std::to_string(rankdiff);
      this->key_ += suffix;
      this->key_common_ += suffix;
550 551
    }

552 553 554
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
G
GaoWei8 已提交
555
          platform::errors::InvalidArgument("Wrong layout set for X tensor."));
556 557
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
G
GaoWei8 已提交
558
          platform::errors::InvalidArgument("Wrong format set for X tensor."));
559 560 561

      PADDLE_ENFORCE_EQ(
          y->layout(), DataLayout::kMKLDNN,
G
GaoWei8 已提交
562
          platform::errors::InvalidArgument("Wrong layout set for Y tensor."));
563 564
      PADDLE_ENFORCE_NE(
          y->format(), MKLDNNMemoryFormat::undef,
G
GaoWei8 已提交
565
          platform::errors::InvalidArgument("Wrong format set for Y tensor."));
566 567 568

      const auto src_x_tz = framework::vectorize(x->dims());
      const auto src_y_tz = framework::vectorize(y->dims());
569 570 571 572
      // if output tensor(z) is nullptr then we are computing into oneDNN
      // managed buffer
      const auto dst_tz =
          (z == nullptr) ? src_x_tz : framework::vectorize(z->dims());
573 574 575

      const auto src0_md = dnnl::memory::desc(
          src_x_tz, platform::MKLDNNGetDataType<T>(), x->format());
576
      auto src1_md = dnnl::memory::desc(
577
          src_y_tz, platform::MKLDNNGetDataType<T>(), y->format());
578
      if (rankdiff > 0) {
579 580 581
        std::vector<int64_t> dims1_ex(rankdiff, 1);
        dims1_ex.insert(next(dims1_ex.begin(), (axis == -1 ? rankdiff : axis)),
                        src_y_tz.begin(), src_y_tz.end());
582 583
        src1_md = src1_md.reshape(dims1_ex);
      }
584 585 586
      const auto dst_md = memory::desc(dst_tz, platform::MKLDNNGetDataType<T>(),
                                       MKLDNNMemoryFormat::any);

587 588 589
      auto attributes = CreateAttributes(algo, scale_x, scale_y, scale_z);
      this->AcquireForwardPrimitiveDescriptor(attributes, algo, src0_md,
                                              src1_md, dst_md);
590
    }
591 592 593 594 595 596
  }

  std::shared_ptr<mkldnn::memory> AcquireSecondSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(
597
        this->fwd_pd_->src1_desc(), to_void_cast<T>(input_data), "@src1_mem_p");
598
  }
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

 private:
  static inline dnnl::primitive_attr CreateAttributes(dnnl::algorithm op,
                                                      float scale_x,
                                                      float scale_y,
                                                      float scale_z) {
    // Scales set in attributes for inputs contibute to the output equation
    // in the following way (assuming no broadcasting takes place):
    // output_i = scale_0 * x_i <+ or *> scale_1 * y_i;
    // Hence we have to create scales that will:
    // 1. Dequantize both values, by multiplying with (1.0 / scale_x_or_y)
    // 2. Quantize their result to output scale range, by multiplying with
    // (scale_z)
    // If we combine these two, we end up with following equation
    // output = scale_out * (1/scale_x * x <* or +> 1/scale_y * y)
    // Hence, to mimic such behaviour using provided interface,
    // For add operation the equation is equal to:
    // output = (scale_out / scale_x) * x + (scale_out / scale_y) * y
    //                <scale_0>                  <scale_1>
    // For mul operation on the other hand
    // output = (scale_out / scale_x) * x * (1.0 / scale_y) * y
    //                <scale_0>                 <scale_1>
    float scale_0 = scale_z / scale_x;
    float scale_1 =
        op == dnnl::algorithm::binary_add ? scale_z / scale_y : 1.0 / scale_y;
    dnnl::primitive_attr attributes;
    attributes.set_scales(/* input_x_id = */ DNNL_ARG_SRC_0, /* mask = */ 0,
                          {scale_0});
    attributes.set_scales(/* input_y_id = */ DNNL_ARG_SRC_1, /* mask = */ 0,
                          {scale_1});
    return attributes;
  }
631 632
};

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
template <typename T>
class ReductionMKLDNNHandler
    : public platform::MKLDNNHandlerT<T, dnnl::reduction> {
 public:
  ReductionMKLDNNHandler(const dnnl::algorithm algo, const float p,
                         const float eps, const MKLDNNDeviceContext& dev_ctx,
                         const mkldnn::engine engine, platform::Place cpu_place,
                         const Tensor* x, const Tensor* y,
                         const std::string& uniq_name)
      : platform::MKLDNNHandlerT<T, dnnl::reduction>(
            dev_ctx, engine, cpu_place,
            platform::CreateKey(dev_ctx, framework::vectorize(x->dims()),
                                uniq_name,
                                (std::to_string(static_cast<int>(algo))))) {
    if (!this->isCached()) {
      PADDLE_ENFORCE_EQ(
          x->layout(), DataLayout::kMKLDNN,
          platform::errors::InvalidArgument("Wrong layout set for X tensor."));
      PADDLE_ENFORCE_NE(
          x->format(), MKLDNNMemoryFormat::undef,
          platform::errors::InvalidArgument("Wrong format set for X tensor."));

      const auto src_tz = framework::vectorize(x->dims());
      const auto dst_tz = framework::vectorize(y->dims());

      // For oneDNN dimensionality should match so we need to
      // extend Y tensor dims with values of 1 (before and after pattern)
      int j = 0;
      std::vector<int64_t> dst_tz_ex(src_tz.size(), 1);
      for (size_t i = 0; i < src_tz.size(); ++i) {
        dst_tz_ex[i] = (src_tz[i] != dst_tz[j]) ? 1 : dst_tz[j++];
      }

      const auto src_md = dnnl::memory::desc(
          src_tz, platform::MKLDNNGetDataType<T>(), x->format());
      const auto dst_md = memory::desc(
          dst_tz_ex, platform::MKLDNNGetDataType<T>(), x->format());

      this->AcquireForwardPrimitiveDescriptor(algo, src_md, dst_md, p, eps);
    }
  }
};

676
template <typename T>
677 678 679
class ActivationMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                            mkldnn::eltwise_backward> {
680
 public:
A
Adam 已提交
681
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
682
                          mkldnn::algorithm algorithm, float alpha, float beta,
683
                          const MKLDNNMemoryFormat fmt,
684 685
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
686
                          const std::string& unique_name, bool is_inplaced)
687

688 689 690
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
691 692 693 694
            is_inplaced
                ? platform::CreateKey(dev_ctx, dims, "a", algorithm,
                                      unique_name)
                : platform::CreateKey(dev_ctx, dims, "a", unique_name)) {
695 696
    auto md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);

697 698
    this->AcquireForwardPrimitiveDescriptor(mkldnn::prop_kind::forward_training,
                                            algorithm, md, alpha, beta);
699
  }
700

A
Adam 已提交
701
  ActivationMKLDNNHandler(const std::vector<int64_t>& dims,
702 703 704 705 706 707 708
                          mkldnn::algorithm algorithm, float alpha, float beta,
                          const MKLDNNMemoryFormat fmt,
                          const MKLDNNMemoryFormat diff_fmt,
                          const platform::MKLDNNDeviceContext& dev_ctx,
                          platform::Place cpu_place,
                          const std::string& unique_name)

709 710 711
      : platform::MKLDNNHandlerT<T, mkldnn::eltwise_forward,
                                 mkldnn::eltwise_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
712
            platform::CreateKey(dev_ctx, dims, "a", unique_name)) {
713 714 715 716 717 718 719
    auto diff_dst_md = platform::MKLDNNMemDesc(
        dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
    auto src_md =
        platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(), fmt);

    this->AcquireBackwardPrimitiveDescriptor(algorithm, diff_dst_md, src_md,
                                             alpha, beta);
720
  }
721

722 723 724
  std::shared_ptr<mkldnn::memory> AcquireBackwardSrcMemory(
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
725
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
726 727
                                            to_void_cast<T>(input_data),
                                            "@bwd-src_mem_p");
728 729 730
  }
};

J
Jacek Czaja 已提交
731 732 733
template <typename T>
class LRNMKLDNNHandler
    : public MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward> {
734
 public:
735
  LRNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
J
Jacek Czaja 已提交
736
                   const platform::MKLDNNDeviceContext& dev_ctx,
737 738 739
                   const mkldnn::engine mkldnn_engine,
                   platform::Place cpu_place, const Tensor* input,
                   const std::string& unique_name)
740

J
Jacek Czaja 已提交
741
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
742
            dev_ctx, mkldnn_engine, cpu_place,
743
            platform::CreateKey(dev_ctx, framework::vectorize(input->dims()),
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
                                unique_name)) {
    if (!this->isCached()) {
      const int n = ctx.Attr<int>("n");
      // MKL-DNN implements LRN in a caffe way:
      // http://caffe.berkeleyvision.org/tutorial/layers/lrn.html
      // Where sum of squares is divided by size of normalization window
      // this is not the case for PaddlePaddle LRN.
      // Hence we need to compensate for this diffrence by
      // multipliing alpha by size of window(n)
      const float alpha = ctx.Attr<float>("alpha") * static_cast<float>(n);
      const float beta = ctx.Attr<float>("beta");
      const float k = ctx.Attr<float>("k");
      bool is_test = ctx.Attr<bool>("is_test");

      auto dims = paddle::framework::vectorize(input->dims());

      auto src_md = mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(),
                                         input->format());

      this->AcquireForwardPrimitiveDescriptor(
          is_test ? mkldnn::prop_kind::forward_inference
                  : mkldnn::prop_kind::forward_training,
          mkldnn::algorithm::lrn_across_channels, src_md, n, alpha, beta, k);
    }
768 769
  }

A
Adam 已提交
770 771
  LRNMKLDNNHandler(const std::vector<int64_t>& dims, const int n,
                   const float alpha, const float beta, const float k,
J
Jacek Czaja 已提交
772 773 774 775
                   const MKLDNNMemoryFormat fmt,
                   const MKLDNNMemoryFormat diff_fmt,
                   const platform::MKLDNNDeviceContext& dev_ctx,
                   platform::Place cpu_place, const std::string& unique_name)
776

J
Jacek Czaja 已提交
777 778
      : platform::MKLDNNHandlerT<T, mkldnn::lrn_forward, mkldnn::lrn_backward>(
            dev_ctx, dev_ctx.GetEngine(), cpu_place,
779
            platform::CreateKey(dev_ctx, dims, unique_name)) {
J
Jacek Czaja 已提交
780 781 782 783
    auto src_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
    auto diff_md =
        mkldnn::memory::desc(dims, platform::MKLDNNGetDataType<T>(), diff_fmt);
784

J
Jacek Czaja 已提交
785
    this->AcquireBackwardPrimitiveDescriptor(
A
Adam 已提交
786 787
        mkldnn::algorithm::lrn_across_channels, src_md, diff_md, n, alpha, beta,
        k);
788 789
  }

J
Jacek Czaja 已提交
790 791 792
  std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(
      framework::Tensor* workspace) {
    T* ptr = workspace->mutable_data<T>(
A
Adam 已提交
793 794 795
        this->place_, this->fwd_pd_->workspace_desc().get_size());
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            ptr, "@wrk_mem_p");
J
Jacek Czaja 已提交
796 797 798 799 800
  }

  std::shared_ptr<mkldnn::memory> AcquireBackwardWorkspaceMemory(
      const framework::Tensor* workspace) {
    const T* workspace_data = workspace->data<T>();
A
Adam 已提交
801 802 803
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->workspace_desc(),
                                            to_void_cast<T>(workspace_data),
                                            "@bwd-wrk_mem_p");
J
Jacek Czaja 已提交
804
  }
805 806
};

807
template <typename T>
808 809
class TransposeMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
810 811
  TransposeMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                         std::vector<int>& axis,      // NOLINT
812 813 814 815
                         const platform::MKLDNNDeviceContext& dev_ctx,
                         mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
816 817 818 819
        axis_(axis),
        logical_axis_(dims.size(), 0) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
820
      const MKLDNNMemoryFormat& fmt, void* ptr) {
821 822 823 824 825 826 827 828 829
    auto local_key = key_ + "@user_src_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      // Make memory descriptor using input format, unless it
      // cannot be trusted (nchw) then make up memory fmt manually
      for (size_t i = 0; i < logical_axis_.size(); ++i) {
        logical_axis_[i] = i;
      }
830

A
Adam 已提交
831
      auto src_md = fmt != MKLDNNMemoryFormat::nchw
832
                        ? platform::MKLDNNMemDesc(
833
                              dims_, platform::MKLDNNGetDataType<T>(), fmt)
834
                        : Axis2MemoryDesc(dims_, logical_axis_);
A
Adam 已提交
835
      mem_p = std::make_shared<mkldnn::memory>(src_md, engine_, ptr);
836 837 838 839 840 841
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }
842 843 844 845 846 847 848

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output,
                                                   platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
A
Adam 已提交
849
      auto dst_md = Axis2MemoryDesc(dims_, axis_);
850

A
Adam 已提交
851
      auto dst_data = output->mutable_data<T>(place, dst_md.get_size());
852

A
Adam 已提交
853
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
854 855
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
856
      auto dst_data = output->mutable_data<T>(place);
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireTranspose(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@transpose_p";
    auto transpose_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (transpose_p == nullptr) {
      transpose_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, transpose_p);
    }
    return transpose_p;
  }

 protected:
A
Adam 已提交
877 878 879 880
  mkldnn::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz,  // NOLINT
                                       std::vector<int>& axis          // NOLINT
                                       ) {
    size_t ndims = axis.size();
881

A
Adam 已提交
882
    std::vector<int64_t> strides(ndims);
883
    unsigned int total_stride = 1;
A
Adam 已提交
884 885
    for (int i = ndims - 1; i >= 0; --i) {
      strides[axis[i]] = total_stride;
886 887
      total_stride *= nchw_tz[axis[i]];
    }
A
Adam 已提交
888 889 890 891
    mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
                               strides);

    return mem_d;
892 893 894
  }

 private:
A
Adam 已提交
895
  std::vector<int64_t> dims_;
896
  std::vector<int> axis_;
897
  std::vector<int> logical_axis_;
898 899
};

900 901
class ReorderMKLDNNHandler : public MKLDNNHandler {
 public:
A
Adam 已提交
902
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
903 904 905 906 907 908 909 910 911 912
                       framework::proto::VarType::Type vtype,
                       mkldnn::memory::data_type dtype,
                       const platform::MKLDNNDeviceContext& dev_ctx,
                       mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        dims_(dims),
        vtype_(vtype),
        dtype_(dtype) {}

  std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
913
      const MKLDNNMemoryFormat& fmt, void* ptr) {
914
    return this->AcquireMemory(dims_, dtype_, fmt, ptr, "@user_src_mem_p");
915 916 917
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemory(
918
      framework::Tensor* output, const MKLDNNMemoryFormat& fmt,
919 920 921 922 923 924
      platform::Place place) {
    auto local_key = key_ + "@user_dst_mem_p";
    auto mem_p =
        std::static_pointer_cast<mkldnn::memory>(dev_ctx_.GetBlob(local_key));
    if (mem_p == nullptr) {
      auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_, fmt);
925
      auto dst_data = output->mutable_data(place, vtype_, dst_md.get_size());
926

A
Adam 已提交
927
      mem_p = std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
928 929
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
930 931 932
      // Even if memory object exists , we may be using it for diffrent tensor
      auto dst_data =
          output->mutable_data(place, vtype_, mem_p->get_desc().get_size());
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
      mem_p->set_data_handle(dst_data);
    }
    return mem_p;
  }

  std::shared_ptr<mkldnn::reorder> AcquireReorder(
      std::shared_ptr<mkldnn::memory> dst_memory_p,
      std::shared_ptr<mkldnn::memory> src_memory_p) {
    auto prim_key = key_ + "@reorder_p";
    auto reorder_p =
        std::static_pointer_cast<mkldnn::reorder>(dev_ctx_.GetBlob(prim_key));
    if (reorder_p == nullptr) {
      reorder_p =
          std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
      dev_ctx_.SetBlob(prim_key, reorder_p);
    }
    return reorder_p;
  }

 private:
A
Adam 已提交
953
  std::vector<int64_t> dims_;
954 955 956 957
  framework::proto::VarType::Type vtype_;
  mkldnn::memory::data_type dtype_;
};

958 959 960 961 962 963 964 965 966 967 968 969 970 971
template <typename T>
struct convolutional_algorithm;

template <>
struct convolutional_algorithm<mkldnn::convolution_forward> {
  static constexpr mkldnn::algorithm T = mkldnn::algorithm::convolution_direct;
};

template <>
struct convolutional_algorithm<mkldnn::deconvolution_forward> {
  static constexpr mkldnn::algorithm T =
      mkldnn::algorithm::deconvolution_direct;
};

J
Jacek Czaja 已提交
972 973 974
template <class forward_t, class backward_data_t, class backward_weights_t>
class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
 public:
975 976 977 978
  ConvMKLDNNTemplateHandler(const platform::MKLDNNDeviceContext& dev_ctx,
                            mkldnn::engine engine, const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {}

979 980 981 982 983 984 985 986 987
  // TODO(jczaja): remove after conv int8 is adapted
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key) {
    conv_pd_ = conv_pd;
  }

J
Jacek Czaja 已提交
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
  ConvMKLDNNTemplateHandler(
      std::shared_ptr<typename forward_t::primitive_desc> conv_pd,
      std::shared_ptr<typename backward_data_t::primitive_desc>
          conv_bwd_data_pd,
      std::shared_ptr<typename backward_weights_t::primitive_desc>
          conv_bwd_weights_pd,
      const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine,
      const std::string& base_key)
      : platform::MKLDNNHandler(dev_ctx, engine, base_key),
        conv_pd_(conv_pd),
        conv_bwd_weights_pd_(conv_bwd_weights_pd),
        conv_bwd_data_pd_(conv_bwd_data_pd) {
    // If we are in Grad operatgor then update a key with BWD suffix to
    // distinguish from FWD memory primitives
    key_ += "-BWD";
  }

A
Adam 已提交
1005
  size_t GetDstMemorySize() const { return conv_pd_->dst_desc().get_size(); }
J
Jacek Czaja 已提交
1006

1007
  MKLDNNMemoryFormat GetDstFormat() const {
A
Adam 已提交
1008
    return paddle::platform::GetMKLDNNFormat(conv_pd_->dst_desc());
J
Jacek Czaja 已提交
1009 1010 1011
  }

  size_t GetDiffWeightsMemorySize() const {
A
Adam 已提交
1012
    return conv_bwd_weights_pd_->diff_weights_desc().get_size();
J
Jacek Czaja 已提交
1013 1014 1015
  }

  size_t GetDiffSourceMemorySize() const {
A
Adam 已提交
1016
    return conv_bwd_data_pd_->diff_src_desc().get_size();
J
Jacek Czaja 已提交
1017 1018 1019 1020 1021
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1022 1023
    auto src_pd = conv_bwd_weights_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1024 1025 1026 1027 1028 1029 1030
    return this->AcquireMemory(src_pd, user_pd, user_memory_p,
                               "@weights-src_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromWeightsPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1031 1032
    auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1033 1034 1035 1036 1037 1038 1039
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@weights-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void* ptr) {
    return this->AcquireMemoryFromPrimitive(
A
Adam 已提交
1040
        conv_bwd_weights_pd_->diff_weights_desc(), ptr, "@diff_weights_mem_p");
J
Jacek Czaja 已提交
1041 1042
  }

1043 1044 1045 1046 1047 1048
  std::shared_ptr<mkldnn::memory> AcquireDiffWeightsMemoryFromWeightsPrimitive(
      void) {
    return this->AcquireMemoryFromPrimitive(
        conv_bwd_weights_pd_->diff_weights_desc(), "@diff_weights_mem_p");
  }

J
Jacek Czaja 已提交
1049 1050 1051
  std::shared_ptr<mkldnn::memory> AcquireDiffDstMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1052 1053
    auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1054 1055 1056 1057 1058 1059 1060
    return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p,
                               "@data-diff_dst_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromDataPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1061 1062
    auto weights_pd = conv_bwd_data_pd_->weights_desc();
    auto user_pd = user_weights_memory_p->get_desc();
J
Jacek Czaja 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
    return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p,
                               "@data-weights_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireResidualDataMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_residual_data_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromResidualDataMemory(
      const std::shared_ptr<mkldnn::memory>& user_residual_memory_p,
      void* dst_ptr,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
    return this->AcquireMemory(user_residual_memory_p,
                               this->AcquireDstMemoryFromPrimitive(dst_ptr),
                               "@residual_data_mem_p", pipeline);
  }

  std::shared_ptr<mkldnn::memory> AcquireDiffSrcMemoryFromDataPrimitive(
      void* ptr) {
A
Adam 已提交
1083 1084
    return this->AcquireMemoryFromPrimitive(conv_bwd_data_pd_->diff_src_desc(),
                                            ptr, "@diff_src_mem_p");
J
Jacek Czaja 已提交
1085 1086 1087
  }

  std::shared_ptr<mkldnn::memory> AcquireDstMemoryFromPrimitive(void* ptr) {
A
Adam 已提交
1088
    return this->AcquireMemoryFromPrimitive(conv_pd_->dst_desc(), ptr,
J
Jacek Czaja 已提交
1089 1090 1091 1092 1093 1094
                                            "@dst_mem_p");
  }

  std::shared_ptr<mkldnn::memory> AcquireSrcMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_memory_p,
      std::vector<mkldnn::primitive>& pipeline) {  // NOLINT
A
Adam 已提交
1095 1096
    auto src_pd = conv_pd_->src_desc();
    auto user_pd = user_memory_p->get_desc();
J
Jacek Czaja 已提交
1097 1098 1099 1100
    return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p",
                               pipeline);
  }

A
Adam 已提交
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemory(
      const mkldnn::memory::desc& md, void* ptr,
      user_function custom_func = {}) {
    return this->AcquireMemory(md, ptr, "@user_weights_mem_p", custom_func);
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemory(
      const mkldnn::memory::desc& md, void* ptr) {
    return this->AcquireMemory(md, ptr, "@user_bias_mem_p");
  }

J
Jacek Czaja 已提交
1112 1113 1114
  std::shared_ptr<mkldnn::memory> AcquireWeightsMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_weights_memory_p,
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
1115 1116
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f}, int mask = 0) {
A
Adam 已提交
1117 1118
    auto user_weights_pd = user_weights_memory_p->get_desc();
    auto weights_pd = conv_pd_->weights_desc();
1119 1120 1121
    return this->AcquireMemory(
        weights_pd, user_weights_pd, user_weights_memory_p, "@weights_mem_p",
        pipeline, is_persistent, is_INT8, scale_data, mask);
J
Jacek Czaja 已提交
1122 1123 1124 1125
  }

  std::shared_ptr<mkldnn::memory> AcquireBiasMemoryFromPrimitive(
      const std::shared_ptr<mkldnn::memory> user_bias_memory_p,
1126 1127 1128 1129
      std::vector<mkldnn::primitive>& pipeline,  // NOLINT
      bool is_persistent = false, bool is_INT8 = false,
      std::vector<float> scale_data = {1.0f},
      int mask = 0) {  // NOLINT
A
Adam 已提交
1130 1131
    auto user_bias_pd = user_bias_memory_p->get_desc();
    auto bias_pd = conv_pd_->bias_desc();
J
Jacek Czaja 已提交
1132
    return this->AcquireMemory(bias_pd, user_bias_pd, user_bias_memory_p,
1133 1134
                               "@bias_mem_p", pipeline, is_persistent, is_INT8,
                               scale_data, mask);
J
Jacek Czaja 已提交
1135 1136
  }

1137
  mkldnn::primitive_attr CreatePostOps(
1138 1139
      std::string fuse_activation, float fuse_alpha, float fuse_beta,
      bool fuse_residual_conn, const std::vector<float> output_shift_scale = {},
1140
      float sum_scale = 1.0f) const {
1141 1142
    mkldnn::primitive_attr conv_attr;
    mkldnn::post_ops post_operations;
1143 1144 1145 1146
    if (output_shift_scale.size() > 0) {
      int mask = output_shift_scale.size() > 1 ? 1 << 1 : 0;
      conv_attr.set_output_scales(mask, output_shift_scale);
    }
1147 1148 1149 1150 1151 1152
    // Fusion with Elementwise layer relies on adding a sum post-operation with
    // the scale parameter. It is assumed that when fuse_residual_connection is
    // true, the output tensor contains the data coming from residual
    // connection. The result of this post_op is:
    // Output = scale * Output + Conv_Out.
    if (fuse_residual_conn) {
1153
      post_operations.append_sum(sum_scale);
1154 1155 1156
    }
    // Fusion with ReLU layer is executed through the PostOps feature. Create a
    // PostOps object and configure it to execute an eltwise relu operation.
1157
    if (fuse_activation == "relu" || fuse_activation == "leaky_relu") {
1158 1159
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_relu,
1160
                                     fuse_alpha, fuse_beta);
1161
    } else if (fuse_activation == "relu6") {
1162 1163 1164
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale,
                                     mkldnn::algorithm::eltwise_bounded_relu,
1165
                                     fuse_alpha, fuse_beta);
1166 1167 1168 1169
    } else if (fuse_activation == "swish") {
      constexpr float scale = 1.0f;
      post_operations.append_eltwise(scale, mkldnn::algorithm::eltwise_swish,
                                     fuse_alpha, fuse_beta);
1170
    }
1171 1172 1173 1174 1175 1176 1177 1178
    conv_attr.set_post_ops(post_operations);
    return conv_attr;
  }

  std::shared_ptr<typename forward_t::primitive_desc>
  AcquireConvolutionPrimitiveDescriptor(
      const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
      boost::optional<const mkldnn::memory::desc&> bias,
A
Adam 已提交
1179
      const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
1180
      const std::vector<int64_t>& dilations,
A
Adam 已提交
1181
      const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
1182 1183
      const std::string& fuse_activation, float fuse_alpha, float fuse_beta,
      const bool fuse_residual_conn, mkldnn::prop_kind fwd_prop_kind,
1184 1185
      const std::vector<float> output_shift_scale = {},
      const float sum_scale = 1.0f) {
1186 1187 1188 1189
    // Conv PD has to be passed to Grad op that
    // may be exxecuted by diffrent thread, hence
    // for that one we use key that does not contain TID
    const std::string key_conv_pd = key_common_ + "@conv_pd";
1190

1191
    conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
1192 1193
        dev_ctx_.GetBlob(key_conv_pd));

1194 1195 1196 1197 1198 1199 1200 1201 1202
    if (conv_pd_ == nullptr) {
      static std::mutex acquire_barrier;
      std::lock_guard<std::mutex> block_threads_until_finish_this_job(
          acquire_barrier);

      conv_pd_ = std::static_pointer_cast<typename forward_t::primitive_desc>(
          dev_ctx_.GetBlob(key_conv_pd));
      if (conv_pd_ == nullptr) {
        mkldnn::memory::dims stride_dims = strides;
1203
        mkldnn::memory::dims dilations_dims = dilations;
1204
        auto mkldnn_paddings = ToMkldnnPadding(paddings);
1205 1206

        auto conv_desc =
A
Adam 已提交
1207 1208
            bias ? typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
1209
                       src, weights, *bias, dst, stride_dims, dilations_dims,
A
Adam 已提交
1210 1211 1212
                       mkldnn_paddings[0], mkldnn_paddings[1])
                 : typename forward_t::desc(
                       fwd_prop_kind, convolutional_algorithm<forward_t>::T,
1213 1214
                       src, weights, dst, stride_dims, dilations_dims,
                       mkldnn_paddings[0], mkldnn_paddings[1]);
1215

1216
        mkldnn::primitive_attr conv_attr =
1217 1218
            CreatePostOps(fuse_activation, fuse_alpha, fuse_beta,
                          fuse_residual_conn, output_shift_scale, sum_scale);
1219 1220 1221 1222 1223 1224

        conv_pd_.reset(new typename forward_t::primitive_desc(
            conv_desc, conv_attr, engine));
        // Save conv_pd/src_memory/weights_memory for backward pass
        dev_ctx_.SetBlob(key_conv_pd, conv_pd_);
      }
1225 1226 1227 1228 1229
    }

    return conv_pd_;
  }

A
Adam 已提交
1230
  std::shared_ptr<forward_t> AcquireConvolution() {
J
Jacek Czaja 已提交
1231 1232 1233 1234
    auto prim_key = key_ + "@conv_p";
    auto conv_p =
        std::static_pointer_cast<forward_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_p == nullptr) {
A
Adam 已提交
1235
      conv_p = std::make_shared<forward_t>(*conv_pd_);
J
Jacek Czaja 已提交
1236 1237 1238 1239 1240 1241

      dev_ctx_.SetBlob(prim_key, conv_p);
    }
    return conv_p;
  }

A
Adam 已提交
1242
  std::shared_ptr<backward_weights_t> AcquireConvolutionBackwardWeights() {
J
Jacek Czaja 已提交
1243 1244 1245 1246 1247
    auto prim_key = key_ + "@conv_bwd_weights_p";
    auto conv_bwd_weights_p = std::static_pointer_cast<backward_weights_t>(
        dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_weights_p == nullptr) {
      // create backward conv primitive for weights
A
Adam 已提交
1248 1249
      conv_bwd_weights_p =
          std::make_shared<backward_weights_t>(*conv_bwd_weights_pd_);
J
Jacek Czaja 已提交
1250 1251 1252 1253 1254
      dev_ctx_.SetBlob(prim_key, conv_bwd_weights_p);
    }
    return conv_bwd_weights_p;
  }

A
Adam 已提交
1255
  std::shared_ptr<backward_data_t> AcquireConvolutionBackwardData() {
J
Jacek Czaja 已提交
1256 1257 1258 1259
    auto prim_key = key_ + "@conv_bwd_data_p";
    auto conv_bwd_data_p =
        std::static_pointer_cast<backward_data_t>(dev_ctx_.GetBlob(prim_key));
    if (conv_bwd_data_p == nullptr) {
A
Adam 已提交
1260
      conv_bwd_data_p = std::make_shared<backward_data_t>(*conv_bwd_data_pd_);
J
Jacek Czaja 已提交
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
      dev_ctx_.SetBlob(prim_key, conv_bwd_data_p);
    }
    return conv_bwd_data_p;
  }

 private:
  std::shared_ptr<typename forward_t::primitive_desc> conv_pd_;
  std::shared_ptr<typename backward_weights_t::primitive_desc>
      conv_bwd_weights_pd_;
  std::shared_ptr<typename backward_data_t::primitive_desc> conv_bwd_data_pd_;
};

using ConvMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::convolution_forward,
                              mkldnn::convolution_backward_data,
                              mkldnn::convolution_backward_weights>;

using ConvTransposeMKLDNNHandler =
    ConvMKLDNNTemplateHandler<mkldnn::deconvolution_forward,
                              mkldnn::deconvolution_backward_data,
                              mkldnn::deconvolution_backward_weights>;
1282

1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromPrimitive(to_void_cast<T>(output_data));
  return dst_memory_p;
}

template <typename T>
static std::shared_ptr<mkldnn::memory> SetDstMemory(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const framework::Tensor* residual_param,
    const mkldnn::memory::desc& user_residual_md,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::vector<mkldnn::primitive>* pipeline) {
  const T* residual_param_data = residual_param->data<T>();
1302 1303 1304 1305
  PADDLE_ENFORCE_NOT_NULL(
      residual_param_data,
      platform::errors::PreconditionNotMet("Residual parameter is required for "
                                           "the DNNL conv+elementwise_add "
G
GaoWei8 已提交
1306
                                           "fusion, but now it is missing."));
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
  std::shared_ptr<mkldnn::memory> user_residual_memory_p =
      handler->AcquireResidualDataMemory(user_residual_md,
                                         to_void_cast<T>(residual_param_data));
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  std::shared_ptr<mkldnn::memory> dst_memory_p =
      handler->AcquireDstMemoryFromResidualDataMemory(
          user_residual_memory_p, to_void_cast<T>(output_data), *pipeline);
  return dst_memory_p;
}

template <typename T>
static void SetDstMemoryHandler(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
    const std::shared_ptr<ConvMKLDNNHandler>& handler,
    std::shared_ptr<mkldnn::memory> dst_memory_p) {
  T* output_data =
      output->mutable_data<T>(ctx.GetPlace(), handler->GetDstMemorySize());
  dst_memory_p->set_data_handle(to_void_cast<T>(output_data));
}

1327 1328 1329
template <typename T>
static void SetDstMemoryQuantized(
    const framework::ExecutionContext& ctx, framework::Tensor* output,
A
Adam 已提交
1330 1331
    std::vector<int64_t> dst_tz, const mkldnn::engine& engine,
    std::shared_ptr<mkldnn::memory::desc>& dst_md,  // NOLINT
1332 1333
    std::shared_ptr<mkldnn::memory>& dst_memory,    // NOLINT
    MKLDNNMemoryFormat output_format) {
1334 1335
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  const size_t dst_dims = dst_tz.size();
1336
  MKLDNNMemoryFormat dst_fmt;
G
GaoWei8 已提交
1337 1338 1339 1340
  PADDLE_ENFORCE_LE(dst_dims, 5, platform::errors::InvalidArgument(
                                     "Dst memory for quantization can not have "
                                     "dims > 5. But received dst_dims is %d.",
                                     dst_dims));
1341
  dst_fmt = platform::MKLDNNFormatForSize(dst_dims, output_format);
1342

A
Adam 已提交
1343
  auto tmp_dst_md = platform::MKLDNNMemDesc(
1344
      {dst_tz}, paddle::framework::ToMKLDNNDataType(
1345
                    framework::DataTypeTrait<T>::DataType()),
1346
      dst_fmt);
A
Adam 已提交
1347 1348 1349
  dst_md.reset(new mkldnn::memory::desc(tmp_dst_md));
  dst_memory.reset(
      new mkldnn::memory(*dst_md, engine, to_void_cast<T>(output_data)));
1350
}
J
Jacek Czaja 已提交
1351 1352
}  // namespace platform
}  // namespace paddle