mkldnn_helper.h 4.8 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
T
tensor-tang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
J
Jacek Czaja 已提交
17
#include <iostream>
P
Physher 已提交
18
#include <memory>
J
Jacek Czaja 已提交
19
#include <sstream>
G
gongweibao 已提交
20
#include <string>
21
#include <utility>
22
#include <vector>
23

24
#include "dnnl.hpp"  // NOLINT
25
#include "paddle/fluid/framework/operator.h"
26
#include "paddle/phi/backends/onednn/onednn_helper.h"
27
#include "paddle/phi/common/place.h"
T
tensor-tang 已提交
28
namespace paddle {
29
#ifdef PADDLE_WITH_MKLDNN
30
using phi::OneDNNContext;
31
#endif
T
tensor-tang 已提交
32 33
namespace platform {

34 35
inline void ClearMKLDNNCache(const platform::Place& place,
                             void* ptr = nullptr) {
36 37 38
  // Clear mkl-dnn cache,
  if (platform::is_cpu_place(place)) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
39
    OneDNNContext* dev_ctx = reinterpret_cast<OneDNNContext*>(pool.Get(place));
40
    dev_ctx->ResetBlobMap(ptr);
41 42 43
  }
}

44 45 46 47
inline void DontClearMKLDNNCache(const platform::Place& place) {
  // Clear mkl-dnn cache,
  if (platform::is_cpu_place(place)) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
48
    OneDNNContext* dev_ctx = reinterpret_cast<OneDNNContext*>(pool.Get(place));
49 50 51 52
    dev_ctx->BlockNextCacheClearing();
  }
}

53 54 55 56
// If MKLDNN build and CPU place then register suffix in DeviceContext
inline void AttachPointerHashToMKLDNNKey(void* ptr,
                                         const platform::Place& place) {
  if (platform::is_cpu_place(place)) {
J
Jacek Czaja 已提交
57 58 59 60 61 62
    // Static vars will remember first executor and its thread
    // so both of them need to be processed by the same thread within
    // critical section
    static std::mutex static_vars_barrier;
    static_vars_barrier.lock();
    static auto first_exec = ptr;
63
    static auto first_thread = phi::funcs::ThreadIDasStr();
J
Jacek Czaja 已提交
64 65 66
    static_vars_barrier.unlock();

    if (first_exec != ptr) {
67
      OneDNNContext::tls().set_key_suffix(
J
Jacek Czaja 已提交
68 69
          "E" + std::to_string(reinterpret_cast<uintptr_t>(ptr)));
    }
70
    // Let's register adress of current executor
71
    OneDNNContext::tls().set_curr_exec(ptr);
72

J
Jacek Czaja 已提交
73
    // For first thread
74 75
    if (first_thread == phi::funcs::ThreadIDasStr()) {
      OneDNNContext::tls().disable_tid_in_key();
J
Jacek Czaja 已提交
76
    }
77 78 79
  }
}

J
Jacek Czaja 已提交
80
inline void RegisterModelLayout(
81
    std::vector<std::unique_ptr<framework::OperatorBase>>& ops,  // NOLINT
J
Jacek Czaja 已提交
82 83
    const platform::Place& place) {
  if (platform::is_cpu_place(place)) {
84 85
    // If there is already registered NHWC then quit this call
    // not to overwrite setting with analysis of internal "while" op block
86
    if (OneDNNContext::tls().get_cur_paddle_data_layout() ==
87
        phi::DataLayout::kNHWC)
88 89
      return;

L
Leo Chen 已提交
90
    VLOG(4) << "RegisterModelLayout for mkldnn";
J
Jacek Czaja 已提交
91 92 93 94
    auto check_attrib = [](std::unique_ptr<framework::OperatorBase>& op,
                           const std::string& attrib_name) -> bool {
      if (op->HasAttr(attrib_name)) {
        auto data_format = op->Attr<std::string>(attrib_name);
95
        OneDNNContext::tls().set_cur_paddle_data_layout(
96 97
            data_format.compare("NHWC") == 0 ? phi::DataLayout::kNHWC
                                             : phi::DataLayout::kNCHW);
J
Jacek Czaja 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
        return true;
      } else {
        return false;
      }
    };

    for (auto& op : ops) {
      if (check_attrib(op, std::string("data_format"))) {
        return;
      }
      if (check_attrib(op, std::string("data_layout"))) {
        return;
      }
    }
  }
}

115 116 117 118 119
inline bool HasOpINT8DataType(const paddle::framework::OpDesc* op) {
  return (op->GetAttrIfExists<std::string>("mkldnn_data_type") == "int8" ||
          op->GetAttrIfExists<bool>("use_quantizer"));
}

120 121 122 123
inline bool HasOpBFLOAT16DataType(const paddle::framework::OpDesc* op) {
  return op->GetAttrIfExists<std::string>("mkldnn_data_type") == "bfloat16";
}

T
tensor-tang 已提交
124
}  // namespace platform
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

inline std::string FindInputNameByVarName(framework::OpDesc* op,
                                          const std::string& searched_name) {
  std::string ret;
  for (const auto& name : op->InputNames())
    for (const auto& input_name : op->Input(name))
      if (input_name == searched_name) ret = name;
  return ret;
}

inline std::string FindOutputNameByVarName(framework::OpDesc* op,
                                           const std::string& searched_name) {
  std::string ret;
  for (const auto& name : op->OutputNames())
    for (const auto& output_name : op->Output(name))
      if (output_name == searched_name) ret = name;
  return ret;
}
T
tensor-tang 已提交
143
}  // namespace paddle