mkldnn_reuse.h 51.1 KB
Newer Older
J
Jacek Czaja 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
17
#include <memory>
18
#include <sstream>
J
Jacek Czaja 已提交
19
#include <string>
20
#include <utility>
J
Jacek Czaja 已提交
21
#include <vector>
22

X
xiaoli.liu@intel.com 已提交
23
#include "paddle/fluid/framework/data_layout_transform.h"
J
Jacek Czaja 已提交
24
#include "paddle/fluid/framework/operator.h"
25
#include "paddle/fluid/operators/pool_op.h"
J
Jacek Czaja 已提交
26 27 28 29 30 31
#include "paddle/fluid/platform/mkldnn_helper.h"
#include "paddle/fluid/platform/place.h"

namespace paddle {
namespace platform {

32 33
using framework::DataLayout;
using framework::Tensor;
J
Jacek Czaja 已提交
34
using user_function = std::function<std::shared_ptr<float>(const float*)>;
35
using memory = dnnl::memory;
J
Jacek Czaja 已提交
36

37 38
template <typename T,
          typename TForward,
39 40 41 42
          typename TBackward = mkldnn_dummy_primitive,
          typename TBackward_params = mkldnn_dummy_primitive>
class MKLDNNHandlerNoCachingT {
 public:
43
  MKLDNNHandlerNoCachingT(dnnl::engine engine, platform::Place cpu_place)
44 45 46 47 48 49 50 51 52 53 54 55 56 57
      : engine_(engine), place_(cpu_place), fwd_pd_(nullptr), bwd_pd_(nullptr) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }

  std::shared_ptr<TForward> AcquireForwardPrimitive() {
    return std::make_shared<TForward>(*fwd_pd_);
  }

  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
    return std::make_shared<TBackward>(*bwd_pd_);
  }

  std::shared_ptr<TBackward_params> AcquireBackwardWeightsPrimitive() {
    PADDLE_ENFORCE_NOT_NULL(
58 59 60
        bwd_w_pd_,
        platform::errors::Unavailable("BWD_PD should be set when "
                                      "getting BWD prim ."));
61 62 63
    return std::make_shared<TBackward_params>(*bwd_w_pd_);
  }

64
  std::shared_ptr<dnnl::memory> AcquireSrcMemory(
65 66 67 68 69 70 71
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
    return this->AcquireMemoryFromPrimitive(fwd_pd_->src_desc(),
                                            to_void_cast<T>(input_data));
  }

  template <typename T_out = T>
72
  std::shared_ptr<dnnl::memory> AcquireDstMemory(framework::Tensor* output) {
73 74 75 76 77 78
    T_out* ptr =
        output->mutable_data<T_out>(place_, fwd_pd_->dst_desc().get_size());
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), ptr);
  }

  template <typename T_out = T>
79
  std::shared_ptr<dnnl::memory> AcquireDstMemory(void) {
80 81 82 83
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc());
  }

  template <typename T_out = T>
84
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
85 86 87 88 89 90
      const framework::Tensor* output) {
    const T_out* output_data = output->data<T_out>();
    return this->AcquireMemoryFromPrimitive(bwd_pd_->dst_desc(),
                                            to_void_cast<T_out>(output_data));
  }

91
  std::shared_ptr<dnnl::memory> AcquireDiffDstMemory(
92 93 94 95 96 97
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_dst_desc(),
                                            to_void_cast<T>(ptr));
  }

98
  std::shared_ptr<dnnl::memory> AcquireDiffSrcMemory(
99 100 101 102 103 104 105
      framework::Tensor* diffsrc) {
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_pd_->diff_src_desc(), ptr);
  }

  // Buffer of given Tensor is used for oneDNN computation
106
  std::shared_ptr<dnnl::memory> AcquireDiffWeightsMemory(
107 108 109 110 111 112 113 114 115 116 117 118
      framework::Tensor* diff_weights) {
    PADDLE_ENFORCE_NOT_NULL(
        bwd_w_pd_,
        platform::errors::Unavailable(
            "BWD_W_PD should be set when getting BWD grad of weights."));
    T* ptr = diff_weights->mutable_data<T>(
        place_, bwd_w_pd_->diff_weights_desc().get_size());
    return this->AcquireMemoryFromPrimitive(bwd_w_pd_->diff_weights_desc(),
                                            ptr);
  }

  // Buffer is allocated by oneDNN to store computation results
119
  std::shared_ptr<dnnl::memory> AcquireDiffWeightsMemory(void) {
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
    PADDLE_ENFORCE_NOT_NULL(
        bwd_w_pd_,
        platform::errors::Unavailable(
            "BWD_W_PD should be set when getting BWD grad of weights."));
    return this->AcquireMemoryFromPrimitive(bwd_w_pd_->diff_weights_desc());
  }

 protected:
  // If your primitive descriptor requires attributes, pass them as a
  // first argument and paramters to descriptor constructor in the following
  // arguments. Otherwise, all arguments will be forwarded to descriptor
  // constructor, including the first one.
  template <typename Arg, typename... Args>
  void AcquireForwardPrimitiveDescriptor(Arg&& first_arg, Args&&... args) {
    CreateForwardPrimitiveDescriptor(first_arg, std::forward<Args>(args)...);
  }

  // Using sfinae to specialise variadic function. Workaround for not having
  // if constexpr in C++ 11.
  template <class First, class... Args>
  typename std::enable_if<std::is_same<typename std::decay<First>::type,
                                       dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
    fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(
        fwd_desc, first, engine_);
  }

  template <class First, class... Args>
  typename std::enable_if<!std::is_same<typename std::decay<First>::type,
                                        dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<First>(first),
                                            std::forward<Args>(args)...);
    fwd_pd_ =
        std::make_shared<typename TForward::primitive_desc>(fwd_desc, engine_);
  }

  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
    // fwd_pd_ is set during grad by calling
    // AcquireForwardPrimitiveDescriptor
    PADDLE_ENFORCE_NOT_NULL(fwd_pd_,
                            platform::errors::Unavailable(
                                "Get MKLDNN Forward primitive %s failed."));
    auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
    bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
        bwd_desc, engine_, *fwd_pd_);
  }

  template <typename... Args>
  void AcquireBackwardWeightsPrimitiveDescriptor(Args&&... args) {
    // fwd_pd_ is set during grad by calling
    // AcquireForwardPrimitiveDescriptor
    PADDLE_ENFORCE_NOT_NULL(fwd_pd_,
                            platform::errors::Unavailable(
                                "Get MKLDNN Forward primitive %s failed."));
    auto bwd_desc =
        typename TBackward_params::desc(std::forward<Args>(args)...);
    bwd_w_pd_ = std::make_shared<typename TBackward_params::primitive_desc>(
        bwd_desc, engine_, *fwd_pd_);
  }

183 184 185
  std::shared_ptr<dnnl::memory> AcquireMemoryFromPrimitive(
      dnnl::memory::desc md, void* ptr) {
    return std::make_shared<dnnl::memory>(md, engine_, ptr);
186 187
  }

188 189 190
  std::shared_ptr<dnnl::memory> AcquireMemoryFromPrimitive(
      dnnl::memory::desc md) {
    return std::make_shared<dnnl::memory>(md, engine_);
191 192
  }

193 194
  void AcquireReorder(const std::shared_ptr<dnnl::memory>& user_memory_p,
                      const std::shared_ptr<dnnl::memory>& target_memory_p) {
195
    auto reorder_p =
196
        std::make_shared<dnnl::reorder>(*user_memory_p, *target_memory_p);
197 198 199 200

    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();

    platform::RecordEvent record_reorder("int_reorder",
C
chenjian 已提交
201
                                         platform::TracerEventType::UserDefined,
202 203 204 205 206
                                         2,
                                         platform::EventRole::kUniqueOp);
    reorder_p->execute(
        astream,
        {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
207 208 209 210
    astream.wait();
  }

  template <typename F = T>
211
  std::shared_ptr<dnnl::memory> AcquireMemoryWithReorder(
212 213 214 215
      const dnnl::memory::desc& user_md,
      const dnnl::memory::desc& target_md,
      void* ptr,
      bool is_persistent = false,
216
      std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {}) {
217
    std::shared_ptr<dnnl::memory> target_memory_p;
218 219 220 221 222 223 224
    if (custom_reorder_func) {
      auto reordered_data =
          custom_reorder_func(reinterpret_cast<const F*>(ptr));
      ptr = reinterpret_cast<void*>(reordered_data.get());
    }
    auto user_memory_p = std::make_shared<dnnl::memory>(user_md, engine_, ptr);
    if (user_md != target_md) {
225
      target_memory_p = std::make_shared<dnnl::memory>(target_md, engine_);
226 227 228 229
      auto reorder_p =
          std::make_shared<dnnl::reorder>(*user_memory_p, *target_memory_p);

      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
C
chenjian 已提交
230
      platform::RecordEvent record_reorder(
231 232 233
          "int_reorder",
          platform::TracerEventType::UserDefined,
          2,
C
chenjian 已提交
234
          platform::EventRole::kUniqueOp);
235 236 237
      reorder_p->execute(
          astream,
          {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
238 239 240 241 242 243 244
      astream.wait();
    } else {
      target_memory_p = user_memory_p;
    }
    return target_memory_p;
  }

245
  dnnl::engine engine_;
246 247 248 249 250 251
  platform::Place place_;
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
  std::shared_ptr<typename TBackward_params::primitive_desc> bwd_w_pd_;
};

252 253
template <typename T,
          typename TForward,
254 255
          typename TBackward = mkldnn_dummy_primitive,
          typename TBackward_params = mkldnn_dummy_primitive>
256 257
class MKLDNNHandlerT {
 public:
258 259 260 261
  MKLDNNHandlerT(const MKLDNNDeviceContext& dev_ctx,
                 dnnl::engine engine,
                 platform::Place cpu_place,
                 const std::string& base_key)
262 263 264 265
      : dev_ctx_(dev_ctx),
        engine_(engine),
        place_(cpu_place),
        key_common_(base_key),
266
        key_(platform::ExtendKeyWithThreadInfoIfNeeded(dev_ctx, base_key)),
267
        fwd_pd_(nullptr),
268 269 270
        bwd_pd_(nullptr) {
    platform::MKLDNNDeviceContext::tls().log_lib_version();
  }
271

A
Adam 已提交
272
  std::shared_ptr<TForward> AcquireForwardPrimitive() {
273
    const std::string key_p = key_ + "@fwd_p";
274 275 276
    auto forward_p =
        std::static_pointer_cast<TForward>(dev_ctx_.GetBlob(key_p));
    if (forward_p == nullptr) {
A
Adam 已提交
277
      forward_p = std::make_shared<TForward>(*fwd_pd_);
278 279 280 281 282
      dev_ctx_.SetBlob(key_p, forward_p);
    }
    return forward_p;
  }

A
Adam 已提交
283
  std::shared_ptr<TBackward> AcquireBackwardPrimitive() {
284
    const std::string key_p = key_ + "@bwd_p";
285 286 287
    auto backward_p =
        std::static_pointer_cast<TBackward>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
A
Adam 已提交
288
      backward_p = std::make_shared<TBackward>(*bwd_pd_);
289 290 291 292 293
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

294 295 296 297 298
  std::shared_ptr<TBackward_params> AcquireBackwardWeightsPrimitive() {
    const std::string key_p = key_ + "@bwd_w_p";
    auto backward_p =
        std::static_pointer_cast<TBackward_params>(dev_ctx_.GetBlob(key_p));
    if (backward_p == nullptr) {
299 300 301 302 303
      PADDLE_ENFORCE_NOT_NULL(
          bwd_w_pd_,
          platform::errors::Unavailable("BWD_PD should be set when "
                                        "getting BWD prim witk key: %s .",
                                        key_p));
304 305 306 307 308 309
      backward_p = std::make_shared<TBackward_params>(*bwd_w_pd_);
      dev_ctx_.SetBlob(key_p, backward_p);
    }
    return backward_p;
  }

310
  std::shared_ptr<dnnl::memory> AcquireSrcMemory(
311 312
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
313 314
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->src_desc(), to_void_cast<T>(input_data), "@src_mem_p");
315 316
  }

317
  template <typename T_out = T>
318
  std::shared_ptr<dnnl::memory> AcquireDstMemory(framework::Tensor* output) {
319 320
    T_out* ptr =
        output->mutable_data<T_out>(place_, fwd_pd_->dst_desc().get_size());
321 322
    return this->AcquireMemoryFromPrimitive(
        fwd_pd_->dst_desc(), ptr, "@dst_mem_p");
323 324
  }

325
  template <typename T_out = T>
326
  std::shared_ptr<dnnl::memory> AcquireDstMemory(void) {
327 328 329
    return this->AcquireMemoryFromPrimitive(fwd_pd_->dst_desc(), "@dstt_mem_p");
  }

330
  template <typename T_out = T>
331
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
332
      const framework::Tensor* output) {
333 334 335 336
    const T_out* output_data = output->data<T_out>();
    return this->AcquireMemoryFromPrimitive(bwd_pd_->dst_desc(),
                                            to_void_cast<T_out>(output_data),
                                            "@bwd-dst_mem_p");
337 338
  }

339
  std::shared_ptr<dnnl::memory> AcquireDiffDstMemory(
340 341
      const framework::Tensor* diffdst) {
    const T* ptr = diffdst->data<T>();
A
Adam 已提交
342 343
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_dst_desc(), to_void_cast<T>(ptr), "@diff_dst_mem_p");
344 345
  }

346
  std::shared_ptr<dnnl::memory> AcquireDiffSrcMemory(
347
      framework::Tensor* diffsrc) {
A
Adam 已提交
348 349
    T* ptr =
        diffsrc->mutable_data<T>(place_, bwd_pd_->diff_src_desc().get_size());
350 351
    return this->AcquireMemoryFromPrimitive(
        bwd_pd_->diff_src_desc(), ptr, "@diff_src_mem_p");
352 353
  }

354
  // Buffer of given Tensor is used for oneDNN computation
355
  std::shared_ptr<dnnl::memory> AcquireDiffWeightsMemory(
356 357 358 359
      framework::Tensor* diff_weights) {
    PADDLE_ENFORCE_NOT_NULL(
        bwd_w_pd_,
        platform::errors::Unavailable(
360
            "BWD_W_PD should be set when getting BWD grad of weights."));
361 362
    T* ptr = diff_weights->mutable_data<T>(
        place_, bwd_w_pd_->diff_weights_desc().get_size());
363 364
    return this->AcquireMemoryFromPrimitive(
        bwd_w_pd_->diff_weights_desc(), ptr, "@diff_wei_mem_p");
365 366 367
  }

  // Buffer is allocated by oneDNN to store computation results
368
  std::shared_ptr<dnnl::memory> AcquireDiffWeightsMemory(void) {
369 370 371
    PADDLE_ENFORCE_NOT_NULL(
        bwd_w_pd_,
        platform::errors::Unavailable(
372
            "BWD_W_PD should be set when getting BWD grad of weights."));
373 374 375 376
    return this->AcquireMemoryFromPrimitive(bwd_w_pd_->diff_weights_desc(),
                                            "@diff_wei_mem_p");
  }

377
 protected:
378
  bool isCached() {
379 380 381 382 383 384 385
    const std::string key_pd = key_ + "@fwd_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));

    return (fwd_pd_ != nullptr);
  }

386
  bool isBwdCached() {
387
    const std::string key_pd = key_ + "@bwd_pd";
388 389 390
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));

391 392 393
    if (bwd_pd_ == nullptr) {
      return false;
    } else {
394 395 396 397 398 399 400 401
      if (std::is_same<TBackward_params, mkldnn_dummy_primitive>::value ==
          false) {
        const std::string key_bw_w_pd = key_ + "@bwd_w_pd";
        bwd_w_pd_ =
            std::static_pointer_cast<typename TBackward_params::primitive_desc>(
                dev_ctx_.GetBlob(key_bw_w_pd));
      }

402 403 404 405 406
      // When BWD is cached then still we need to Get FWD PD
      const std::string key_fpd = key_ + "@fwd_pd";
      fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
          dev_ctx_.GetBlob(key_fpd));
      PADDLE_ENFORCE_NOT_NULL(
407 408 409
          fwd_pd_,
          platform::errors::Unavailable(
              "Error: FWD PD should be set when BWD PD is cached."));
410 411
      return true;
    }
412 413
  }

414 415 416 417 418 419
  // If your primitive descriptor requires attributes, pass them as a
  // first argument and paramters to descriptor constructor in the following
  // arguments. Otherwise, all arguments will be forwarded to descriptor
  // constructor, including the first one.
  template <typename Arg, typename... Args>
  void AcquireForwardPrimitiveDescriptor(Arg&& first_arg, Args&&... args) {
420 421 422 423 424 425 426 427 428 429 430
    // This is used when we can recreate FWD PD in BWD so
    // we do not need to pass FWD to BWD
    const std::string key_pd = key_ + "@fwd_pd";
    fwd_pd_ = std::static_pointer_cast<typename TForward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (fwd_pd_ == nullptr) {
      CreateForwardPrimitiveDescriptor(first_arg, std::forward<Args>(args)...);
      dev_ctx_.SetBlob(key_pd, fwd_pd_);
    }
  }

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
  // Using sfinae to specialise variadic function. Workaround for not having
  // if constexpr in C++ 11.
  template <class First, class... Args>
  typename std::enable_if<std::is_same<typename std::decay<First>::type,
                                       dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<Args>(args)...);
    fwd_pd_ = std::make_shared<typename TForward::primitive_desc>(
        fwd_desc, first, engine_);
  }

  template <class First, class... Args>
  typename std::enable_if<!std::is_same<typename std::decay<First>::type,
                                        dnnl::primitive_attr>::value>::type
  CreateForwardPrimitiveDescriptor(First&& first, Args&&... args) {
    auto fwd_desc = typename TForward::desc(std::forward<First>(first),
                                            std::forward<Args>(args)...);
    fwd_pd_ =
        std::make_shared<typename TForward::primitive_desc>(fwd_desc, engine_);
  }

452 453
  template <typename... Args>
  void AcquireBackwardPrimitiveDescriptor(Args&&... args) {
454
    // fwd_pd_ is set during grad by calling
455
    // AcquireForwardPrimitiveDescriptor
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_,
        platform::errors::Unavailable("Get MKLDNN Forward primitive %s failed.",
                                      key_ + "@fwd_pd"));
    const std::string key_pd = key_ + "@bwd_pd";
    bwd_pd_ = std::static_pointer_cast<typename TBackward::primitive_desc>(
        dev_ctx_.GetBlob(key_pd));
    if (bwd_pd_ == nullptr) {
      auto bwd_desc = typename TBackward::desc(std::forward<Args>(args)...);
      bwd_pd_ = std::make_shared<typename TBackward::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_pd_);
    }
  }

471
  template <typename... Args>
472
  void AcquireBackwardWeightsPrimitiveDescriptor(Args&&... args) {
473
    // fwd_pd_ is set during grad by calling
474
    // AcquireForwardPrimitiveDescriptor
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
    PADDLE_ENFORCE_NOT_NULL(
        fwd_pd_,
        platform::errors::Unavailable("Get MKLDNN Forward primitive %s failed.",
                                      key_ + "@fwd_pd"));
    const std::string key_pd = key_ + "@bwd_w_pd";
    bwd_w_pd_ =
        std::static_pointer_cast<typename TBackward_params::primitive_desc>(
            dev_ctx_.GetBlob(key_pd));
    if (bwd_w_pd_ == nullptr) {
      auto bwd_desc =
          typename TBackward_params::desc(std::forward<Args>(args)...);
      bwd_w_pd_ = std::make_shared<typename TBackward_params::primitive_desc>(
          bwd_desc, engine_, *fwd_pd_);
      dev_ctx_.SetBlob(key_pd, bwd_w_pd_);
    }
  }

492
  std::shared_ptr<dnnl::memory> AcquireMemoryFromPrimitive(
493
      const std::string& suffix) {
494
    return std::static_pointer_cast<dnnl::memory>(
495 496 497
        dev_ctx_.GetBlob(key_ + suffix));
  }

498 499
  std::shared_ptr<dnnl::memory> AcquireMemoryFromPrimitive(
      dnnl::memory::desc md, void* ptr, const std::string& suffix) {
500
    const auto local_key = key_ + suffix;
501
    auto mem_p =
502
        std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(local_key));
503
    if (mem_p == nullptr) {
504
      mem_p = std::make_shared<dnnl::memory>(md, engine_, ptr);
505 506 507 508 509 510 511
      dev_ctx_.SetBlob(local_key, mem_p);
    } else {
      mem_p->set_data_handle(ptr);
    }
    return mem_p;
  }

512 513
  std::shared_ptr<dnnl::memory> AcquireMemoryFromPrimitive(
      dnnl::memory::desc md, const std::string& suffix) {
514 515
    const auto local_key = key_ + suffix;
    auto mem_p =
516
        std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(local_key));
517
    if (mem_p == nullptr) {
518
      mem_p = std::make_shared<dnnl::memory>(md, engine_);
519 520 521 522 523
      dev_ctx_.SetBlob(local_key, mem_p);
    }
    return mem_p;
  }

524 525
  void AcquireReorder(const std::shared_ptr<dnnl::memory>& user_memory_p,
                      const std::shared_ptr<dnnl::memory>& target_memory_p) {
526
    auto reorder_p =
527
        std::make_shared<dnnl::reorder>(*user_memory_p, *target_memory_p);
528

529
    auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
530 531

    platform::RecordEvent record_reorder("int_reorder",
C
chenjian 已提交
532
                                         platform::TracerEventType::UserDefined,
533 534 535 536 537
                                         2,
                                         platform::EventRole::kUniqueOp);
    reorder_p->execute(
        astream,
        {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
538 539 540
    astream.wait();
  }

541
  template <typename F = T>
542
  std::shared_ptr<dnnl::memory> AcquireMemoryWithReorder(
543 544 545 546 547
      const dnnl::memory::desc& user_md,
      const dnnl::memory::desc& target_md,
      void* ptr,
      const std::string& suffix,
      bool is_persistent = false,
A
Adam Osewski 已提交
548
      std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {},
549 550
      const std::vector<float>& scale_data = {1.0f},
      int mask = 0) {
551 552 553 554 555 556 557 558
    const auto target_key = key_ + suffix + "_target";
    const auto key_reorder_p = key_ + suffix + "reorder_p";
    const auto user_key = key_ + suffix + "_user";

    auto target_memory_p =
        std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(target_key));

    if (target_memory_p == nullptr) {
559 560 561 562 563 564
      if (custom_reorder_func) {
        auto reordered_data =
            custom_reorder_func(reinterpret_cast<const F*>(ptr));
        dev_ctx_.SetBlob(key_reorder_p + "-custom_reorder", reordered_data);
        ptr = reinterpret_cast<void*>(reordered_data.get());
      }
565 566 567
      auto user_memory_p =
          std::make_shared<dnnl::memory>(user_md, engine_, ptr);
      if (user_md != target_md) {
568
        target_memory_p = std::make_shared<dnnl::memory>(target_md, engine_);
A
Adam Osewski 已提交
569 570 571 572
        dnnl::reorder::primitive_desc reorder_pdesc;
        if (is_int8<T>()) {
          dnnl::primitive_attr attr;
          attr.set_output_scales(mask, scale_data);
573 574
          reorder_pdesc = dnnl::reorder::primitive_desc(
              *user_memory_p, *target_memory_p, attr);
A
Adam Osewski 已提交
575 576 577 578 579
        } else {
          reorder_pdesc =
              dnnl::reorder::primitive_desc(*user_memory_p, *target_memory_p);
        }
        auto reorder_p = std::make_shared<dnnl::reorder>(reorder_pdesc);
580 581
        dev_ctx_.SetBlob(key_reorder_p, reorder_p);

582
        auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
C
chenjian 已提交
583
        platform::RecordEvent record_reorder(
584 585 586
            "int_reorder",
            platform::TracerEventType::UserDefined,
            2,
C
chenjian 已提交
587
            platform::EventRole::kUniqueOp);
588 589 590
        reorder_p->execute(
            astream,
            {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
591 592 593 594 595 596 597
        astream.wait();
      } else {
        target_memory_p = user_memory_p;
      }
      dev_ctx_.SetBlob(user_key, user_memory_p);
      dev_ctx_.SetBlob(target_key, target_memory_p);
    } else if (!is_persistent) {
598
      auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
599 600 601 602 603

      auto user_memory_p =
          std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(user_key));
      user_memory_p->set_data_handle(ptr);

604 605
      // TODO(jczaja): Here we detect if reorder is cached it means it is needed
      // need to change this to get rid of keys
606
      auto reorder_p = std::static_pointer_cast<dnnl::reorder>(
607 608
          dev_ctx_.GetBlob(key_reorder_p));
      if (reorder_p != nullptr) {
C
chenjian 已提交
609
        platform::RecordEvent record_reorder(
610 611 612
            "int_reorder",
            platform::TracerEventType::UserDefined,
            2,
C
chenjian 已提交
613
            platform::EventRole::kUniqueOp);
614 615 616
        reorder_p->execute(
            astream,
            {{DNNL_ARG_FROM, *user_memory_p}, {DNNL_ARG_TO, *target_memory_p}});
617 618 619 620 621 622
        astream.wait();
      }
    }
    return target_memory_p;
  }

623
  std::shared_ptr<dnnl::memory> AcquireMemory(const std::string& suffix) {
624
    const auto local_key = key_ + suffix;
625
    return std::static_pointer_cast<dnnl::memory>(dev_ctx_.GetBlob(local_key));
626 627
  }

628
  const MKLDNNDeviceContext& dev_ctx_;
629
  dnnl::engine engine_;
630 631
  platform::Place place_;
  std::string key_common_;
632
  std::string key_;
633 634
  std::shared_ptr<typename TForward::primitive_desc> fwd_pd_;
  std::shared_ptr<typename TBackward::primitive_desc> bwd_pd_;
635
  std::shared_ptr<typename TBackward_params::primitive_desc> bwd_w_pd_;
636 637
};

638
template <typename T>
639 640
class BinaryMKLDNNHandler
    : public platform::MKLDNNHandlerNoCachingT<T, dnnl::binary> {
641
 public:
642 643 644 645 646 647 648 649 650 651
  BinaryMKLDNNHandler(const dnnl::algorithm algo,
                      const int axis,
                      const dnnl::engine engine,
                      platform::Place cpu_place,
                      const Tensor* x,
                      const Tensor* y,
                      Tensor* out,
                      float scale_x,
                      float scale_y,
                      float scale_out,
652
                      const dnnl::post_ops& post_ops = dnnl::post_ops{})
653
      : platform::MKLDNNHandlerNoCachingT<T, dnnl::binary>(engine, cpu_place) {
654 655
    const auto src_x_tz = phi::vectorize(x->dims());
    const auto src_y_tz = phi::vectorize(y->dims());
656 657 658
    // if output tensor(z) is nullptr then we are computing into oneDNN
    // managed buffer
    auto rankdiff = x->dims().size() - y->dims().size();
659 660
    const auto dst_tz = (out == nullptr) ? (rankdiff > 0 ? src_x_tz : src_y_tz)
                                         : phi::vectorize(out->dims());
661

662 663
    auto src0_md = x->mem_desc();
    auto src1_md = y->mem_desc();
664 665 666
    if (rankdiff > 0) {  // Second input is of smaller rank than first
      std::vector<int64_t> dims1_ex(rankdiff, 1);
      dims1_ex.insert(next(dims1_ex.begin(), (axis == -1 ? rankdiff : axis)),
667 668
                      src_y_tz.begin(),
                      src_y_tz.end());
J
Jacek Czaja 已提交
669 670 671 672 673
      // For broadcasting for NHWC we need rotate extended shape
      if (MKLDNNDeviceContext::tls().get_cur_paddle_data_layout() ==
          framework::DataLayout::kNHWC) {
        std::rotate(dims1_ex.begin() + 1, dims1_ex.end() - 1, dims1_ex.end());
      }
674 675 676 677
      src1_md = src1_md.reshape(dims1_ex);
    } else if (rankdiff < 0) {  // First input is of smaller than second
      std::vector<int64_t> dims0_ex(-rankdiff, 1);
      dims0_ex.insert(next(dims0_ex.begin(), (axis == -1 ? -rankdiff : axis)),
678 679
                      src_x_tz.begin(),
                      src_x_tz.end());
J
Jacek Czaja 已提交
680 681 682 683 684
      // For broadcasting for NHWC we need rotate extended shape
      if (MKLDNNDeviceContext::tls().get_cur_paddle_data_layout() ==
          framework::DataLayout::kNHWC) {
        std::rotate(dims0_ex.begin() + 1, dims0_ex.end() - 1, dims0_ex.end());
      }
685
      src0_md = src0_md.reshape(dims0_ex);
686
    }
687 688
    const auto dst_md = memory::desc(
        dst_tz, platform::MKLDNNGetDataType<T>(), MKLDNNMemoryFormat::any);
689

690
    auto attributes =
691
        CreateAttributes(algo, scale_x, scale_y, scale_out, post_ops);
692

693 694 695 696 697 698 699
    if (x->numel() < y->numel()) {
      this->AcquireForwardPrimitiveDescriptor(
          attributes, algo, src1_md, src0_md, dst_md);
    } else {
      this->AcquireForwardPrimitiveDescriptor(
          attributes, algo, src0_md, src1_md, dst_md);
    }
700
  }
701
  std::shared_ptr<dnnl::memory> AcquireSecondSrcMemory(
702 703
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
704 705
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->src1_desc(),
                                            to_void_cast<T>(input_data));
706
  }
707 708

 private:
709
  static inline dnnl::primitive_attr CreateAttributes(
710 711 712 713
      dnnl::algorithm op,
      float scale_x,
      float scale_y,
      float scale_out,
714
      dnnl::post_ops post_ops = dnnl::post_ops{}) {
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
    // Scales set in attributes for inputs contibute to the output equation
    // in the following way (assuming no broadcasting takes place):
    // output_i = scale_0 * x_i <+ or *> scale_1 * y_i;
    // Hence we have to create scales that will:
    // 1. Dequantize both values, by multiplying with (1.0 / scale_x_or_y)
    // 2. Quantize their result to output scale range, by multiplying with
    // (scale_z)
    // If we combine these two, we end up with following equation
    // output = scale_out * (1/scale_x * x <* or +> 1/scale_y * y)
    // Hence, to mimic such behaviour using provided interface,
    // For add operation the equation is equal to:
    // output = (scale_out / scale_x) * x + (scale_out / scale_y) * y
    //                <scale_0>                  <scale_1>
    // For mul operation on the other hand
    // output = (scale_out / scale_x) * x * (1.0 / scale_y) * y
    //                <scale_0>                 <scale_1>
731
    float scale_0 = scale_out / scale_x;
732
    float scale_1 =
733
        op == dnnl::algorithm::binary_add ? scale_out / scale_y : 1.0 / scale_y;
734
    dnnl::primitive_attr attributes;
735 736 737 738
    attributes.set_scales(
        /* input_x_id = */ DNNL_ARG_SRC_0, /* mask = */ 0, {scale_0});
    attributes.set_scales(
        /* input_y_id = */ DNNL_ARG_SRC_1, /* mask = */ 0, {scale_1});
739
    if (post_ops.len() > 0) attributes.set_post_ops(post_ops);
740 741
    return attributes;
  }
742 743
};

744 745
template <typename T>
class BroadcastDataMKLDNNHandler
746
    : public platform::MKLDNNHandlerNoCachingT<T, dnnl::binary> {
747 748
 public:
  BroadcastDataMKLDNNHandler(const dnnl::algorithm algo,
749
                             const dnnl::engine engine,
750 751 752 753 754
                             platform::Place cpu_place,
                             const Tensor* x,
                             Tensor* out,
                             float scale_x,
                             float scale_y,
755
                             const std::vector<int64_t>& extended_x_dims)
756
      : platform::MKLDNNHandlerNoCachingT<T, dnnl::binary>(engine, cpu_place) {
757
    const auto src0_tz = phi::vectorize(out->dims());
758
    const auto src0_md =
759 760
        dnnl::memory::desc(src0_tz,
                           platform::MKLDNNGetDataType<T>(),
761
                           platform::GetPlainMKLDNNFormat(src0_tz.size()));
762
    const auto src1_md = x->mem_desc().reshape(extended_x_dims);
763 764 765 766 767

    dnnl::primitive_attr attributes;
    attributes.set_scales(DNNL_ARG_SRC_0, 0, {scale_x});
    attributes.set_scales(DNNL_ARG_SRC_1, 0, {scale_y});

768 769
    this->AcquireForwardPrimitiveDescriptor(
        attributes, algo, src0_md, src1_md, src0_md);
770 771
  }

772
  template <typename T_out = T>
773 774 775
  std::shared_ptr<dnnl::memory> AcquireZeroedDstMemory(framework::Tensor* out) {
    T_out* ptr = out->mutable_data<T_out>(this->place_,
                                          this->fwd_pd_->dst_desc().get_size());
776
    memset(ptr, 0, this->fwd_pd_->dst_desc().get_size());
777
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->dst_desc(), ptr);
778 779 780
  }
};

781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
static void AppendActivation(const framework::ExecutionContext& ctx,
                             dnnl::post_ops& post_ops,
                             float activation_scale = 1.0f) {
  const auto invalid_attribute =
      ctx.HasAttr("fuse_activation")
          ? ctx.Attr<std::string>("fuse_activation").empty()
          : true;
  if (invalid_attribute) return;

  const auto fuse_activation = ctx.Attr<std::string>("fuse_activation");
  const auto fuse_alpha =
      ctx.HasAttr("fuse_alpha") ? ctx.Attr<float>("fuse_alpha") : 0.0f;
  const auto fuse_beta =
      ctx.HasAttr("fuse_beta") ? ctx.Attr<float>("fuse_beta") : 0.0f;

  if (fuse_activation == "hard_sigmoid") {
    post_ops.append_eltwise(activation_scale,
                            dnnl::algorithm::eltwise_linear,
                            fuse_alpha,
                            fuse_beta);
    post_ops.append_eltwise(
        activation_scale, dnnl::algorithm::eltwise_clip, 0.0f, 1.0f);
  } else {
    const std::unordered_map<std::string, dnnl::algorithm> activation_map = {
        {"abs", dnnl::algorithm::eltwise_abs},
        {"clip", dnnl::algorithm::eltwise_clip},
        {"gelu", dnnl::algorithm::eltwise_gelu_erf},
        {"gelu_erf", dnnl::algorithm::eltwise_gelu_erf},
        {"gelu_tanh", dnnl::algorithm::eltwise_gelu_tanh},
        {"hard_swish", dnnl::algorithm::eltwise_hardswish},
        {"leaky_relu", dnnl::algorithm::eltwise_relu},
        {"mish", dnnl::algorithm::eltwise_mish},
        {"relu", dnnl::algorithm::eltwise_relu},
        {"relu6", dnnl::algorithm::eltwise_bounded_relu},
        {"sigmoid", dnnl::algorithm::eltwise_logistic},
        {"sqrt", dnnl::algorithm::eltwise_sqrt},
        {"swish", dnnl::algorithm::eltwise_swish},
        {"tanh", dnnl::algorithm::eltwise_tanh}};

    const auto& activation_type = activation_map.find(fuse_activation);

    PADDLE_ENFORCE_NE(
        activation_type,
        activation_map.end(),
        platform::errors::InvalidArgument(
            "Activation '%s' not found in oneDNN algorithms mapper",
            fuse_activation));

    post_ops.append_eltwise(
        activation_scale, activation_type->second, fuse_alpha, fuse_beta);
  }
}

834 835
template <typename T>
class ReductionMKLDNNHandler
836
    : public platform::MKLDNNHandlerNoCachingT<T, dnnl::reduction> {
837
 public:
838 839 840 841 842 843 844 845
  ReductionMKLDNNHandler(const dnnl::algorithm algo,
                         const float p,
                         const float eps,
                         const dnnl::engine engine,
                         platform::Place cpu_place,
                         const Tensor* x,
                         const Tensor* out,
                         std::vector<int64_t> out_tz,
846
                         const dnnl::primitive_attr& attrs = NULL)
847 848
      : platform::MKLDNNHandlerNoCachingT<T, dnnl::reduction>(engine,
                                                              cpu_place) {
849 850
    const auto out_md = memory::desc(out_tz,
                                     platform::MKLDNNGetDataType<T>(),
851
                                     dnnl::memory::format_tag::any);
852

853
    if (attrs)
854 855
      this->AcquireForwardPrimitiveDescriptor(
          attrs, algo, x->mem_desc(), out_md, p, eps);
856
    else
857 858
      this->AcquireForwardPrimitiveDescriptor(
          algo, x->mem_desc(), out_md, p, eps);
859 860 861
  }
};

862
template <typename T>
863 864 865 866 867 868 869 870 871 872
constexpr bool IsInt8() {
  return std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value;
}

template <typename T>
constexpr bool IsBfloat16() {
  return std::is_same<T, paddle::platform::bfloat16>::value;
}

template <typename XT, typename YT, typename OT>
873
class MatMulV2MKLDNNHandler
874
    : public paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul> {
875
 public:
876 877
  MatMulV2MKLDNNHandler(const framework::ExecutionContext& ctx,
                        const dnnl::engine engine,
878
                        paddle::platform::Place cpu_place,
879 880 881 882
                        const std::vector<int64_t>& x_org_dims,
                        bool trans_x,
                        const std::vector<int64_t>& y_org_dims,
                        bool trans_y,
883 884 885
                        bool is_output_fused,
                        const std::vector<int64_t>& x_strides_override,
                        const std::vector<int64_t>& y_strides_override)
886 887
      : paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul>(engine,
                                                                    cpu_place) {
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
    // M X K * K X N
    std::vector<int64_t> x_dims(x_org_dims);
    std::vector<int64_t> y_dims(y_org_dims);

    const int MB_idx = x_dims.size() - 3;
    const int H_idx = x_dims.size() - 2;
    const int W_idx = x_dims.size() - 1;

    if (trans_x) std::swap(x_dims[H_idx], x_dims[W_idx]);
    if (trans_y) std::swap(y_dims[H_idx], y_dims[W_idx]);

    const memory::dim M = x_dims[H_idx];
    const memory::dim K = x_dims[W_idx];
    const memory::dim N = y_dims[W_idx];

    std::vector<int64_t> x_strides(x_dims.size() - 3, 1);
    std::vector<int64_t> y_strides(x_dims.size() - 3, 1);
    std::vector<int64_t> out_strides(x_dims.size() - 3, 1);
    std::vector<int64_t> out_ddims(x_dims.size() - 3, 1);

    x_strides.reserve(x_dims.size());
    y_strides.reserve(x_dims.size());
    out_strides.reserve(x_dims.size());

    if (!x_strides_override.empty()) {
      x_strides = x_strides_override;
    } else {
      if (!trans_x) {
        x_strides.insert(x_strides.end(), {M * K, K, 1});
      } else {
        x_strides.insert(x_strides.end(), {M * K, 1, M});
      }
    }

    if (!y_strides_override.empty()) {
      y_strides = y_strides_override;
    } else {
      if (!trans_y) {
        y_strides.insert(y_strides.end(), {N * K, N, 1});
      } else {
        y_strides.insert(y_strides.end(), {N * K, 1, K});
      }
    }

    out_strides.insert(out_strides.end(), {M * N, N, 1});
    out_ddims.insert(out_ddims.end(),
                     {std::max(x_dims[MB_idx], y_dims[MB_idx]), M, N});

    for (int i = x_dims.size() - 4; i >= 0; --i) {
      out_ddims[i] = std::max(x_dims[i], y_dims[i]);
      if (x_strides_override.empty()) {
        x_strides[i] = x_dims[i + 1] * x_strides[i + 1];
      }
      if (y_strides_override.empty()) {
        y_strides[i] = y_dims[i + 1] * y_strides[i + 1];
      }
      out_strides[i] = out_ddims[i + 1] * out_strides[i + 1];
    }

947
    if (!IsInt8<OT>() && !IsBfloat16<OT>() && is_output_fused) {
948 949 950
      out_strides = FakeTransposeStrides(out_ddims);
    }

951 952 953
    auto x_md = memory::desc(x_dims, MKLDNNGetDataType<XT>(), x_strides);
    auto y_md = memory::desc(y_dims, MKLDNNGetDataType<YT>(), y_strides);
    auto out_md = memory::desc(out_ddims, MKLDNNGetDataType<OT>(), out_strides);
954

955 956 957 958 959
    const dnnl::primitive_attr matmul_attrs = CreateMatmulAttrs(ctx);

    this->AcquireForwardPrimitiveDescriptor(matmul_attrs, x_md, y_md, out_md);
  }

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
  float ComputeOutputScale(const framework::ExecutionContext& ctx) {
    float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 1.0f;
    if (ctx.HasAttr("Scale_x") && ctx.HasAttr("Scale_y") &&
        ctx.HasAttr("Scale_out")) {
      float scale_x = ctx.Attr<float>("Scale_x");
      float scale_y = ctx.Attr<float>("Scale_y");
      bool force_fp32_out = ctx.HasAttr("force_fp32_output")
                                ? ctx.Attr<bool>("force_fp32_output")
                                : false;
      float scale_out = force_fp32_out ? 1.f : ctx.Attr<float>("Scale_out");
      alpha *= scale_out / (scale_x * scale_y);
    }
    return alpha;
  }

975 976 977 978 979
  dnnl::primitive_attr CreateMatmulAttrs(
      const framework::ExecutionContext& ctx) {
    dnnl::primitive_attr matmul_attrs;
    dnnl::post_ops post_operations;

980 981 982
    float scale_out = ComputeOutputScale(ctx);
    if (scale_out != 1.0f) {
      matmul_attrs.set_output_scales(0, {scale_out});
983 984
    }

985 986 987 988 989 990 991 992 993 994
    if (ctx.HasInput("ResidualData")) {
      auto* residual_data = ctx.Input<Tensor>("ResidualData");
      auto residual_data_tz = phi::vectorize(residual_data->dims());
      auto residual_data_md = memory::desc(residual_data_tz,
                                           dnnl::memory::data_type::f32,
                                           dnnl::memory::format_tag::abcd);
      post_operations.append_binary(dnnl::algorithm::binary_add,
                                    residual_data_md);
    }

995 996 997 998
    AppendActivation(ctx, post_operations);

    matmul_attrs.set_post_ops(post_operations);
    return matmul_attrs;
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
  }

  std::vector<int64_t> FakeTransposeStrides(
      const std::vector<int64_t>& matmul_out_dims) const {
    // fuse matmul_v2 + transpose + reshape guarantees that output is 4D and
    // transpose axis are: {0, 2, 1, 3}
    std::vector<int64_t> transpose_axis = {0, 2, 1, 3};
    std::vector<int64_t> fake_strides(transpose_axis.size());
    int ndims = static_cast<int>(transpose_axis.size());

    int total_stride = 1;

    for (int i = ndims - 1; i >= 0; --i) {
      fake_strides[transpose_axis[i]] = total_stride;
      total_stride *= matmul_out_dims[transpose_axis[i]];
    }

    return fake_strides;
  }

  std::shared_ptr<memory> AcquireWeightsMemory(const Tensor* input) {
1020
    const YT* input_data = input->data<YT>();
1021
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->weights_desc(),
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
                                            to_void_cast<YT>(input_data));
  }

  std::shared_ptr<dnnl::memory> AcquireDstMemory(
      paddle::framework::Tensor* output) {
    // We cannot use base AcquireDstMemory as it makes an allocation request
    // base on DST memory primitive size. This is fine in general, but in MatMul
    // we have primitive that covers only one batch of Data and then shift
    // pointer for every new batch. Hence Tensor size is bigger that dst memory
    // primitive size. So would we request less memory that is there and it
    // triggers an
    // assertion.  So as there is no 'any' format here we can leave default size
    // of Tensor as computed in ComputeInferShape
    OT* ptr = output->mutable_data<OT>(this->place_);
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->dst_desc(), ptr);
1037 1038 1039
  }
};

1040
template <typename T>
1041
class ActivationMKLDNNHandler
1042 1043
    : public MKLDNNHandlerNoCachingT<T,
                                     dnnl::eltwise_forward,
1044
                                     dnnl::eltwise_backward> {
1045
 public:
1046
  ActivationMKLDNNHandler(dnnl::algorithm algorithm,
1047
                          const framework::ExecutionContext& ctx,
1048 1049
                          const dnnl::engine engine,
                          Place cpu_place,
1050
                          const framework::Tensor* x)
1051 1052
      : platform::MKLDNNHandlerNoCachingT<T,
                                          dnnl::eltwise_forward,
1053 1054
                                          dnnl::eltwise_backward>(engine,
                                                                  cpu_place) {
1055 1056
    float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 0;
    float beta = ctx.HasAttr("beta") ? ctx.Attr<float>("beta") : 0;
1057 1058

    if (ctx.Type() == "scale") {
1059 1060
      bool bias_after_scale = ctx.Attr<bool>("bias_after_scale");
      auto* scale_tensor = ctx.Input<Tensor>("ScaleTensor");
1061 1062 1063
      alpha = (scale_tensor == nullptr)
                  ? ctx.Attr<float>("scale")
                  : static_cast<float>(*(scale_tensor->data<T>()));
1064 1065 1066 1067 1068
      beta = ctx.Attr<float>("bias");
      // if bias_after_scale == true
      //   out = scale*X + bias
      // else
      //   out = scale*(X + bias) = scale*X + scale*bias
1069 1070 1071 1072 1073 1074 1075 1076
      if (!bias_after_scale) {
        beta *= alpha;
      }
    } else if (ctx.Type() == "clip") {
      alpha = ctx.HasInput("Min") ? ctx.Input<Tensor>("Min")->data<float>()[0]
                                  : ctx.Attr<float>("min");
      beta = ctx.HasInput("Max") ? ctx.Input<Tensor>("Max")->data<float>()[0]
                                 : ctx.Attr<float>("max");
1077 1078
    } else {
      // paddle uses beta but mkldnn uses alpha for swish
1079
      if (algorithm == dnnl::algorithm::eltwise_swish) {
1080 1081 1082
        std::swap(alpha, beta);
      } else if (algorithm == dnnl::algorithm::eltwise_bounded_relu) {
        alpha = ctx.Attr<float>("threshold");
1083
      }
1084
    }
1085

1086
    this->AcquireForwardPrimitiveDescriptor(dnnl::prop_kind::forward_training,
1087 1088 1089
                                            algorithm,
                                            x->mem_desc(),
                                            alpha,
1090
                                            beta);
1091 1092
  }

1093
  ActivationMKLDNNHandler(dnnl::algorithm algorithm,
1094
                          const framework::ExecutionContext& ctx,
1095 1096 1097 1098 1099 1100
                          const dnnl::engine engine,
                          Place cpu_place,
                          const framework::Tensor* x,
                          const Tensor* dout)
      : platform::MKLDNNHandlerNoCachingT<T,
                                          dnnl::eltwise_forward,
1101 1102
                                          dnnl::eltwise_backward>(engine,
                                                                  cpu_place) {
1103 1104 1105 1106
    float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 0;
    float beta = ctx.HasAttr("beta") ? ctx.Attr<float>("beta") : 0;

    // paddle uses beta but mkldnn uses alpha for swish
1107
    if (algorithm == dnnl::algorithm::eltwise_swish) {
1108 1109 1110 1111
      std::swap(alpha, beta);
    } else if (algorithm == dnnl::algorithm::eltwise_bounded_relu) {
      alpha = ctx.Attr<float>("threshold");
    }
1112

1113 1114 1115 1116 1117 1118 1119
    if (ctx.Type() == "clip_grad") {
      alpha = ctx.HasInput("Min") ? ctx.Input<Tensor>("Min")->data<float>()[0]
                                  : ctx.Attr<float>("min");
      beta = ctx.HasInput("Max") ? ctx.Input<Tensor>("Max")->data<float>()[0]
                                 : ctx.Attr<float>("max");
    }

1120
    this->AcquireForwardPrimitiveDescriptor(dnnl::prop_kind::forward_training,
1121 1122 1123
                                            algorithm,
                                            x->mem_desc(),
                                            alpha,
1124
                                            beta);
1125 1126
    this->AcquireBackwardPrimitiveDescriptor(
        algorithm, dout->mem_desc(), x->mem_desc(), alpha, beta);
1127
  }
1128

1129
  std::shared_ptr<dnnl::memory> AcquireBackwardSrcMemory(
1130 1131
      const framework::Tensor* input) {
    const T* input_data = input->data<T>();
A
Adam 已提交
1132
    return this->AcquireMemoryFromPrimitive(this->bwd_pd_->src_desc(),
1133
                                            to_void_cast<T>(input_data));
1134 1135 1136
  }
};

1137 1138 1139
static std::unordered_map<std::string, std::string> GetAttributeMap(
    std::string act_type) {
  std::unordered_map<std::string, std::string> attr_map;
1140
  if (act_type == "swish") {
1141
    attr_map.emplace("beta", "fuse_alpha");
1142
  } else if (act_type == "relu6") {
1143
    attr_map.emplace("threshold", "fuse_alpha");
1144
  } else if (act_type == "hard_sigmoid") {
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
    attr_map.emplace("slope", "fuse_alpha");
    attr_map.emplace("offset", "fuse_beta");
  } else if (act_type == "clip") {
    attr_map.emplace("min", "fuse_alpha");
    attr_map.emplace("max", "fuse_beta");
  } else {
    attr_map.emplace("alpha", "fuse_alpha");
    attr_map.emplace("beta", "fuse_beta");
  }
  return attr_map;
}

static std::vector<std::string> GetSupportedActivations() {
  return std::vector<std::string>{"abs",
                                  "clip",
                                  "gelu",
                                  "hard_sigmoid",
                                  "hard_swish",
                                  "leaky_relu",
                                  "mish",
                                  "relu",
                                  "relu6",
                                  "sigmoid",
                                  "sqrt",
                                  "swish",
                                  "tanh"};
1171 1172
}

1173
class ReorderMKLDNNHandler {
1174
 public:
A
Adam 已提交
1175
  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
1176
                       framework::proto::VarType::Type vtype,
1177 1178
                       dnnl::memory::data_type dtype,
                       dnnl::engine engine)
1179
      : dims_(dims),
1180
        vtype_(vtype),
1181 1182
        vtype_dst_(vtype),
        dtype_(dtype),
1183 1184
        dtype_dst_(dtype),
        engine_(engine) {}
1185 1186 1187

  ReorderMKLDNNHandler(std::vector<int64_t>& dims,  // NOLINT
                       framework::proto::VarType::Type vtype,
1188
                       dnnl::memory::data_type dtype,
1189
                       framework::proto::VarType::Type vtype_dst,
1190 1191
                       dnnl::memory::data_type dtype_dst,
                       dnnl::engine engine)
1192
      : dims_(dims),
1193 1194 1195
        vtype_(vtype),
        vtype_dst_(vtype_dst),
        dtype_(dtype),
1196 1197
        dtype_dst_(dtype_dst),
        engine_(engine) {}
1198

1199 1200 1201 1202 1203
  std::shared_ptr<dnnl::memory> AcquireSrcMemory(const dnnl::memory::desc& md,
                                                 void* ptr) {
    return std::make_shared<dnnl::memory>(md, engine_, ptr);
  }

1204 1205 1206 1207
  std::shared_ptr<dnnl::memory> AcquireSrcMemory(const MKLDNNMemoryFormat& fmt,
                                                 void* ptr) {
    auto md = dnnl::memory::desc(dims_, dtype_, fmt);
    return std::make_shared<dnnl::memory>(md, engine_, ptr);
1208 1209
  }

1210
  std::shared_ptr<dnnl::memory> AcquireSubmemory(
1211 1212
      const std::vector<int64_t>& dims,
      const std::vector<int64_t>& offset,
1213
      const std::shared_ptr<dnnl::memory>& mem_p) {
1214
    auto sub_md = mem_p->get_desc().submemory_desc(dims, {offset});
1215 1216
    auto sub_mem_p = std::make_shared<dnnl::memory>(
        sub_md, engine_, mem_p->get_data_handle());
1217 1218 1219
    return sub_mem_p;
  }

1220 1221 1222
  std::shared_ptr<dnnl::memory> AcquireDstMemory(framework::Tensor* output,
                                                 const MKLDNNMemoryFormat& fmt,
                                                 platform::Place place) {
1223
    auto dst_md = platform::MKLDNNMemDesc(dims_, dtype_dst_, fmt);
1224
    auto dst_data = output->mutable_data(
1225
        place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size());
1226
    return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
1227 1228
  }

1229
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
1230 1231
      framework::Tensor* output,
      const dnnl::memory::desc& src_md,
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
      platform::Place place) {
    if (vtype_dst_ == vtype_) {
      auto dst_data = output->mutable_data(
          place, framework::TransToPhiDataType(vtype_dst_), src_md.get_size());
      return std::make_shared<dnnl::memory>(src_md, engine_, dst_data);
    } else {
      auto dst_md = src_md;
      dst_md.data.data_type = static_cast<dnnl_data_type_t>(dtype_dst_);
      auto dst_data = output->mutable_data(
          place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size());
      return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
    }
  }

1246
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
1247 1248 1249 1250
      framework::Tensor* output,
      const std::vector<int64_t>& dims,
      const MKLDNNMemoryFormat& fmt,
      platform::Place place) {
1251
    auto dst_md = platform::MKLDNNMemDesc(dims, dtype_dst_, fmt);
1252
    auto dst_data = output->mutable_data(
1253
        place, framework::TransToPhiDataType(vtype_dst_), dst_md.get_size());
1254
    return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
1255 1256
  }

1257 1258 1259 1260
  std::shared_ptr<dnnl::reorder> AcquireReorder(
      std::shared_ptr<dnnl::memory> dst_memory_p,
      std::shared_ptr<dnnl::memory> src_memory_p) {
    return std::make_shared<dnnl::reorder>(*(src_memory_p), *(dst_memory_p));
1261 1262
  }

1263 1264 1265 1266
  std::shared_ptr<dnnl::reorder> AcquireReorder(
      std::shared_ptr<dnnl::memory> dst_memory_p,
      std::shared_ptr<dnnl::memory> src_memory_p,
      const dnnl::primitive_attr& attrs) {
1267 1268
    return std::make_shared<dnnl::reorder>(
        *(src_memory_p), *(dst_memory_p), attrs);
1269 1270
  }

1271
 private:
A
Adam 已提交
1272
  std::vector<int64_t> dims_;
1273
  framework::proto::VarType::Type vtype_, vtype_dst_;
1274 1275
  dnnl::memory::data_type dtype_, dtype_dst_;
  dnnl::engine engine_;
1276 1277
};

1278 1279
template <typename T>
static void SetDstMemoryQuantized(
1280 1281 1282 1283
    const framework::ExecutionContext& ctx,
    framework::Tensor* output,
    std::vector<int64_t> dst_tz,
    const dnnl::engine& engine,
1284 1285
    std::shared_ptr<dnnl::memory::desc>& dst_md,  // NOLINT
    std::shared_ptr<dnnl::memory>& dst_memory,    // NOLINT
1286
    MKLDNNMemoryFormat output_format) {
1287 1288
  T* output_data = output->mutable_data<T>(ctx.GetPlace());
  const size_t dst_dims = dst_tz.size();
1289
  MKLDNNMemoryFormat dst_fmt;
1290

1291 1292
  PADDLE_ENFORCE_LE(dst_dims,
                    5,
1293 1294 1295 1296
                    platform::errors::InvalidArgument(
                        "Dst memory for quantization can not have "
                        "dims > 5. But received dst_dims is %d.",
                        dst_dims));
1297
  dst_fmt = platform::MKLDNNFormatForSize(dst_dims, output_format);
1298

1299 1300 1301 1302 1303
  auto tmp_dst_md =
      platform::MKLDNNMemDesc({dst_tz},
                              paddle::framework::ToMKLDNNDataType(
                                  framework::DataTypeTrait<T>::DataType()),
                              dst_fmt);
1304
  dst_md.reset(new dnnl::memory::desc(tmp_dst_md));
A
Adam 已提交
1305
  dst_memory.reset(
1306
      new dnnl::memory(*dst_md, engine, to_void_cast<T>(output_data)));
1307
}
A
Adam Osewski 已提交
1308

J
Jacek Czaja 已提交
1309 1310
}  // namespace platform
}  // namespace paddle