mkldnn_helper.h 4.9 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
T
tensor-tang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
J
Jacek Czaja 已提交
17
#include <iostream>
P
Physher 已提交
18
#include <memory>
J
Jacek Czaja 已提交
19
#include <sstream>
G
gongweibao 已提交
20
#include <string>
21
#include <utility>
22
#include <vector>
23

24
#include "dnnl.hpp"  // NOLINT
25
#include "paddle/fluid/framework/operator.h"
M
mozga-intel 已提交
26
#include "paddle/fluid/platform/place.h"
27
#include "paddle/phi/backends/onednn/onednn_helper.h"
T
tensor-tang 已提交
28
namespace paddle {
29
#ifdef PADDLE_WITH_MKLDNN
30
using OneDNNMemoryFormat = dnnl::memory::format_tag;
31
using phi::OneDNNContext;
32
#endif
T
tensor-tang 已提交
33 34
namespace platform {

35 36
inline void ClearMKLDNNCache(const platform::Place& place,
                             void* ptr = nullptr) {
37 38 39
  // Clear mkl-dnn cache,
  if (platform::is_cpu_place(place)) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
40
    OneDNNContext* dev_ctx = reinterpret_cast<OneDNNContext*>(pool.Get(place));
41
    dev_ctx->ResetBlobMap(ptr);
42 43 44
  }
}

45 46 47 48
inline void DontClearMKLDNNCache(const platform::Place& place) {
  // Clear mkl-dnn cache,
  if (platform::is_cpu_place(place)) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
49
    OneDNNContext* dev_ctx = reinterpret_cast<OneDNNContext*>(pool.Get(place));
50 51 52 53
    dev_ctx->BlockNextCacheClearing();
  }
}

54 55 56 57
// If MKLDNN build and CPU place then register suffix in DeviceContext
inline void AttachPointerHashToMKLDNNKey(void* ptr,
                                         const platform::Place& place) {
  if (platform::is_cpu_place(place)) {
J
Jacek Czaja 已提交
58 59 60 61 62 63
    // Static vars will remember first executor and its thread
    // so both of them need to be processed by the same thread within
    // critical section
    static std::mutex static_vars_barrier;
    static_vars_barrier.lock();
    static auto first_exec = ptr;
64
    static auto first_thread = phi::funcs::ThreadIDasStr();
J
Jacek Czaja 已提交
65 66 67
    static_vars_barrier.unlock();

    if (first_exec != ptr) {
68
      OneDNNContext::tls().set_key_suffix(
J
Jacek Czaja 已提交
69 70
          "E" + std::to_string(reinterpret_cast<uintptr_t>(ptr)));
    }
71
    // Let's register adress of current executor
72
    OneDNNContext::tls().set_curr_exec(ptr);
73

J
Jacek Czaja 已提交
74
    // For first thread
75 76
    if (first_thread == phi::funcs::ThreadIDasStr()) {
      OneDNNContext::tls().disable_tid_in_key();
J
Jacek Czaja 已提交
77
    }
78 79 80
  }
}

J
Jacek Czaja 已提交
81
inline void RegisterModelLayout(
82
    std::vector<std::unique_ptr<framework::OperatorBase>>& ops,  // NOLINT
J
Jacek Czaja 已提交
83 84
    const platform::Place& place) {
  if (platform::is_cpu_place(place)) {
85 86
    // If there is already registered NHWC then quit this call
    // not to overwrite setting with analysis of internal "while" op block
87
    if (OneDNNContext::tls().get_cur_paddle_data_layout() ==
88
        phi::DataLayout::kNHWC)
89 90
      return;

L
Leo Chen 已提交
91
    VLOG(4) << "RegisterModelLayout for mkldnn";
J
Jacek Czaja 已提交
92 93 94 95
    auto check_attrib = [](std::unique_ptr<framework::OperatorBase>& op,
                           const std::string& attrib_name) -> bool {
      if (op->HasAttr(attrib_name)) {
        auto data_format = op->Attr<std::string>(attrib_name);
96
        OneDNNContext::tls().set_cur_paddle_data_layout(
97 98
            data_format.compare("NHWC") == 0 ? phi::DataLayout::kNHWC
                                             : phi::DataLayout::kNCHW);
J
Jacek Czaja 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
        return true;
      } else {
        return false;
      }
    };

    for (auto& op : ops) {
      if (check_attrib(op, std::string("data_format"))) {
        return;
      }
      if (check_attrib(op, std::string("data_layout"))) {
        return;
      }
    }
  }
}

116 117 118 119 120
inline bool HasOpINT8DataType(const paddle::framework::OpDesc* op) {
  return (op->GetAttrIfExists<std::string>("mkldnn_data_type") == "int8" ||
          op->GetAttrIfExists<bool>("use_quantizer"));
}

121 122 123 124
inline bool HasOpBFLOAT16DataType(const paddle::framework::OpDesc* op) {
  return op->GetAttrIfExists<std::string>("mkldnn_data_type") == "bfloat16";
}

T
tensor-tang 已提交
125
}  // namespace platform
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143

inline std::string FindInputNameByVarName(framework::OpDesc* op,
                                          const std::string& searched_name) {
  std::string ret;
  for (const auto& name : op->InputNames())
    for (const auto& input_name : op->Input(name))
      if (input_name == searched_name) ret = name;
  return ret;
}

inline std::string FindOutputNameByVarName(framework::OpDesc* op,
                                           const std::string& searched_name) {
  std::string ret;
  for (const auto& name : op->OutputNames())
    for (const auto& output_name : op->Output(name))
      if (output_name == searched_name) ret = name;
  return ret;
}
T
tensor-tang 已提交
144
}  // namespace paddle