mkldnn_helper.h 13.7 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
T
tensor-tang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once

16
#include <algorithm>
P
Physher 已提交
17
#include <memory>
G
gongweibao 已提交
18
#include <string>
19
#include <utility>
20
#include <vector>
21
#include "mkldnn.hpp"
22
#include "paddle/fluid/framework/operator.h"
M
mozga-intel 已提交
23
#include "paddle/fluid/platform/place.h"
T
tensor-tang 已提交
24
namespace paddle {
25
#ifdef PADDLE_WITH_MKLDNN
A
Adam 已提交
26
using MKLDNNMemoryFormat = mkldnn::memory::format_tag;
27
#endif
T
tensor-tang 已提交
28 29 30 31 32
namespace platform {

using MKLDNNStream = mkldnn::stream;
using MKLDNNEngine = mkldnn::engine;
using MKLDNNMemory = mkldnn::memory;
33
using MKLDNNMemoryDescriptor = mkldnn::memory::desc;
T
tensor-tang 已提交
34 35 36
using MKLDNNPrimitive = mkldnn::primitive;
using MKLDNNPrimitiveDesc = mkldnn::handle<mkldnn_primitive_desc_t>;

37 38 39 40 41
typedef std::unique_ptr<MKLDNNStream> MKLDNNStreamPtr;
typedef std::unique_ptr<MKLDNNEngine> MKLDNNEnginePtr;
typedef std::unique_ptr<MKLDNNMemory> MKLDNNMemoryPtr;
typedef std::unique_ptr<MKLDNNPrimitive> MKLDNNPrimitivePtr;
typedef std::unique_ptr<MKLDNNPrimitiveDesc> MKLDNNPrimitiveDescPtr;
T
tensor-tang 已提交
42

43 44 45 46 47
template <typename Type>
void* to_void_cast(const Type* t) {
  return static_cast<void*>(const_cast<Type*>(t));
}

K
Krzysztof Binias 已提交
48 49 50 51 52
template <typename Type>
void* to_void_reinterpret_cast(const Type* t) {
  return reinterpret_cast<void*>(const_cast<Type*>(t));
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
template <class Type>
using tf_desc = typename Type::desc;

template <class Type>
using tf_pd = typename Type::primitive_desc;

template <typename Type, typename Engine, typename... Args>
std::shared_ptr<tf_pd<Type>> MKLDNNFwdPrimitiveDesc(const Engine& e,
                                                    Args&&... args) {
  auto desc = tf_desc<Type>(mkldnn::prop_kind::forward, (args)...);
  auto pd = new tf_pd<Type>(desc, e);
  return std::shared_ptr<tf_pd<Type>>(pd);
}

template <typename Type, typename Engine, typename Primitive, typename... Args>
tf_pd<Type> MKLDNNBwdPrimitiveDesc(const Engine& e, const Primitive& p,
                                   Args&&... args) {
  auto desc = tf_desc<Type>(args...);
  return tf_pd<Type>(desc, e, p);
}

74 75 76
inline void MatchShapeToLayout(framework::Tensor* tensor_in,
                               framework::DataLayout from,
                               framework::DataLayout to) {
77 78 79
  // In these data layouts, channel dimension is either on 2nd position: nChw or
  // at last nhwC, so for dim==2 these layouts are the same and nothing should
  // be done. Similarly for dim==1 when you have just one possible combination.
80 81 82 83
  if (tensor_in->dims().size() < 3) {
    return;
  }

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
  switch (from) {
    case framework::DataLayout::kMKLDNN:
      if (to == framework::DataLayout::kNHWC) {
        auto dims = framework::vectorize<int>(tensor_in->dims());
        std::rotate(dims.begin() + 1, dims.begin() + 2, dims.end());
        tensor_in->Resize(framework::make_ddim(dims));
      }
      break;
    case framework::DataLayout::kNHWC:
      if (to == framework::DataLayout::kMKLDNN) {
        auto dims = framework::vectorize<int>(tensor_in->dims());
        std::rotate(dims.begin() + 1, dims.end() - 1, dims.end());
        tensor_in->Resize(framework::make_ddim(dims));
      }
      break;
    default:
      break;
  }
}

A
Adam 已提交
104
inline mkldnn::memory::desc MKLDNNMemDesc(const std::vector<int64_t>& dims,
105
                                          mkldnn::memory::data_type data_type,
106
                                          MKLDNNMemoryFormat format) {
A
Adam 已提交
107
  return mkldnn::memory::desc({dims}, data_type, format);
108 109 110 111 112 113 114
}

inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) {
  bool use_mkldnn = ctx.Attr<bool>("use_mkldnn");
  return use_mkldnn && platform::is_cpu_place(ctx.GetPlace());
}

115 116
template <typename Type>
mkldnn::memory::data_type MKLDNNGetDataType() {
A
Adam 已提交
117
  return mkldnn::memory::data_type::undef;
118 119 120 121
}

template <>
inline mkldnn::memory::data_type MKLDNNGetDataType<float>() {
122 123 124 125 126
  return mkldnn::memory::data_type::f32;
}
template <>
inline mkldnn::memory::data_type MKLDNNGetDataType<int32_t>() {
  return mkldnn::memory::data_type::s32;
127
}
P
Physher 已提交
128 129
template <>
inline mkldnn::memory::data_type MKLDNNGetDataType<int8_t>() {
130
  return mkldnn::memory::data_type::s8;
P
Physher 已提交
131 132 133
}
template <>
inline mkldnn::memory::data_type MKLDNNGetDataType<uint8_t>() {
134
  return mkldnn::memory::data_type::u8;
P
Physher 已提交
135 136
}

A
Adam 已提交
137 138
inline void Reorder(mkldnn::memory src, mkldnn::memory dst,
                    const mkldnn::engine& engine) {
M
mozga-intel 已提交
139
  auto reorder_prim = mkldnn::reorder(src, dst);
A
Adam 已提交
140 141 142
  mkldnn::stream astream(engine);
  reorder_prim.execute(astream, src, dst);
  astream.wait();
M
mozga-intel 已提交
143 144
}

A
Adam 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
inline mkldnn::memory::format_tag GetMKLDNNFormat(
    mkldnn::memory::desc mem_desc) {
  auto ndims = mem_desc.data.ndims;
  auto strides = mem_desc.data.format_desc.blocking.strides;
  auto inner_nblks = mem_desc.data.format_desc.blocking.inner_nblks;
  auto inner_blks = mem_desc.data.format_desc.blocking.inner_blks;
  auto inner_idxs = mem_desc.data.format_desc.blocking.inner_idxs;

  if (ndims == 1) {
    return mkldnn::memory::format_tag::x;
  } else if (ndims == 2) {
    if (inner_nblks == 0) {
      if (strides[0] >= strides[1]) {
        return mkldnn::memory::format_tag::nc;
      } else {
        return mkldnn::memory::format_tag::cn;
      }
    }
  } else if (ndims == 3) {
    if (inner_nblks == 0) {
      if (strides[0] >= strides[1] && strides[1] >= strides[2]) {
        return mkldnn::memory::format_tag::ncw;
      } else {
        return mkldnn::memory::format_tag::nwc;
      }
    }
  } else if (ndims == 4) {
    if (inner_nblks == 0) {
      if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
          strides[2] >= strides[3]) {
        return mkldnn::memory::format_tag::nchw;
      } else {
        return mkldnn::memory::format_tag::nhwc;
      }
    } else if (inner_nblks == 1) {
      if (inner_blks[0] == 16 && inner_idxs[0] == 1) {
        return mkldnn::memory::format_tag::nChw16c;
      } else if (inner_blks[0] == 8 && inner_idxs[0] == 1) {
        return mkldnn::memory::format_tag::nChw8c;
      } else if (inner_blks[0] == 8 && inner_idxs[0] == 0) {
        if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
            strides[3] >= strides[1]) {
          return mkldnn::memory::format_tag::Acdb8a;
        }
      } else if (inner_blks[0] == 4 && inner_idxs[0] == 1) {
        return mkldnn::memory::format_tag::nChw4c;
      } else if (inner_blks[0] == 16 && inner_idxs[0] == 0) {
        if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
            strides[3] >= strides[1]) {
          return mkldnn::memory::format_tag::Acdb16a;
        }
      }
    } else if (inner_nblks == 2) {
      if (inner_blks[0] == 16 && inner_blks[1] == 16) {
        if (inner_idxs[0] == 1 && inner_idxs[1] == 0) {
          return mkldnn::memory::format_tag::OIhw16i16o;
        }
      } else if (inner_blks[0] == 8 && inner_blks[1] == 8) {
        if (inner_idxs[0] == 1 && inner_idxs[1] == 0) {
          return mkldnn::memory::format_tag::OIhw8i8o;
        }
      }
    }
  } else if (ndims == 5) {
    if (inner_nblks == 0) {
      if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
          strides[2] >= strides[3] && strides[3] >= strides[4]) {
        return mkldnn::memory::format_tag::ncdhw;
      } else {
        return mkldnn::memory::format_tag::ndhwc;
      }
    } else if (inner_nblks == 1) {
      if (inner_blks[0] == 8 && inner_idxs[0] == 0) {
        if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
            strides[3] >= strides[4] && strides[4] >= strides[1]) {
          return mkldnn::memory::format_tag::Acdeb8a;
        }
      } else if (inner_blks[0] == 8 && inner_idxs[0] == 1) {
        if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
            strides[2] >= strides[3] && strides[3] >= strides[4]) {
          return mkldnn::memory::format_tag::aBcde8b;
        }
      } else if (inner_blks[0] == 16 && inner_idxs[0] == 0) {
        if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
            strides[3] >= strides[4] && strides[4] >= strides[1]) {
          return mkldnn::memory::format_tag::Acdeb16a;
        }
      } else if (inner_blks[0] == 16 && inner_idxs[0] == 1) {
        if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
            strides[2] >= strides[3] && strides[3] >= strides[4]) {
          return mkldnn::memory::format_tag::aBcde16b;
        }
      }
    }
  } else if (ndims == 6) {
    if (inner_nblks == 0) {
      if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
          strides[2] >= strides[3] && strides[3] >= strides[4] &&
          strides[4] >= strides[5]) {
        return mkldnn::memory::format_tag::abcdef;
      }
    }
  }
  // DEBUG CODE - KEEP UNTILL TENSOR.MEMORY_DESC IMPLEMENTED
  // std::cout<<"@@@@@@@@@@ UNDEFINED FORMAT @@@@@@@@@@@@@@@@@@@"<<std::endl;
  // std::cout<<"NDIMS: "<<ndims<<std::endl;
  // std::cout<<"INNER_NBLKS: "<<inner_nblks<<std::endl;
  // for (int i=0;i<ndims;++i) {
  //   std::cout<<"STRIDE["<<i<<"]: "<<strides[i]<<std::endl;
  // }
  // for (int i=0;i<inner_nblks;++i) {
  //   std::cout<<"INNER_BLKS["<<i<<"]: "<<inner_blks[i]<<std::endl;
  // }
  // for (int i=0;i<inner_nblks;++i) {
  //   std::cout<<"INNER_IDXS["<<i<<"]: "<<inner_idxs[i]<<std::endl;
  // }
  return mkldnn::memory::format_tag::undef;
M
mozga-intel 已提交
262 263
}

A
Adam 已提交
264 265 266
inline mkldnn::memory::format_tag GetMKLDNNFormat(const mkldnn::memory memory) {
  auto mem_desc = memory.get_desc();
  return GetMKLDNNFormat(mem_desc);
267 268
}

269 270
inline MKLDNNMemoryFormat MKLDNNFormatForSize(size_t dims_size,
                                              MKLDNNMemoryFormat data_format) {
271
  if (dims_size == 1) {
272
    return MKLDNNMemoryFormat::x;
273
  } else if (dims_size == 2) {
274
    return MKLDNNMemoryFormat::nc;
275
  } else if (dims_size == 3) {
276 277 278 279
    if (data_format == MKLDNNMemoryFormat::nchw) {
      return MKLDNNMemoryFormat::ncw;
    } else if (data_format == MKLDNNMemoryFormat::nhwc) {
      return MKLDNNMemoryFormat::nwc;
280
    }
281
  } else if (dims_size == 4) {
282 283
    if (data_format == MKLDNNMemoryFormat::goihw) {
      return MKLDNNMemoryFormat::oihw;
284
    }
285
  } else if (dims_size == 5) {
286 287
    if (data_format == MKLDNNMemoryFormat::goidhw) {
      return MKLDNNMemoryFormat::oidhw;
288
    }
289 290 291 292
    if (data_format == MKLDNNMemoryFormat::nchw) {
      return MKLDNNMemoryFormat::ncdhw;
    } else if (data_format == MKLDNNMemoryFormat::nhwc) {
      return MKLDNNMemoryFormat::ndhwc;
293
    }
294 295 296 297
  }
  return data_format;
}

298
inline MKLDNNMemoryFormat data_format_to_memory_format(
299 300 301
    const std::string& data_format) {
  switch (framework::StringToDataLayout(data_format)) {
    case framework::DataLayout::kNHWC:
302
      return MKLDNNMemoryFormat::nhwc;
303
    case framework::DataLayout::kNCHW:
304
      return MKLDNNMemoryFormat::nchw;
305
    default:
306
      return MKLDNNMemoryFormat::any;
307 308 309
  }
}

310
inline MKLDNNMemoryFormat StringToMKLDNNFormat(std::string* format) {
311 312 313
  std::transform(format->begin(), format->end(), format->begin(), ::tolower);

  if (!format->compare("nchw")) {
314
    return MKLDNNMemoryFormat::nchw;
315
  } else if (!format->compare("nchw16c")) {
316
    return MKLDNNMemoryFormat::nChw16c;
317
  } else if (!format->compare("nchw8c")) {
318
    return MKLDNNMemoryFormat::nChw8c;
319
  } else if (!format->compare("nhwc")) {
320
    return MKLDNNMemoryFormat::nhwc;
321
  } else {
322
    return MKLDNNMemoryFormat::any;
323 324 325
  }
}

A
Adam 已提交
326 327 328 329 330
inline std::string ThreadIDasStr(void) {
  return std::to_string(
      std::hash<std::thread::id>()(std::this_thread::get_id()));
}

331 332 333
template <typename T>
inline void AppendKey(std::string* key, const T& num) {
  key->append(std::to_string(num));
A
Adam 已提交
334 335
}

A
Adam 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
template <>
inline void AppendKey(std::string* key,
                      const mkldnn::memory::format_tag& format) {
  key->append(std::to_string(static_cast<int>(format)));
}

template <>
inline void AppendKey(std::string* key,
                      const mkldnn::memory::data_type& data_type) {
  key->append(std::to_string(static_cast<int>(data_type)));
}

template <>
inline void AppendKey(std::string* key, const mkldnn::algorithm& algorithm) {
  key->append(std::to_string(static_cast<int>(algorithm)));
}

template <>
inline void AppendKey(std::string* key,
                      const mkldnn::normalization_flags& flags) {
  key->append(std::to_string(static_cast<int>(flags)));
}

359 360
inline void AppendKey(std::string* key, const std::string& str) {
  key->append(str);
A
Adam 已提交
361 362
}

363
inline void AppendKey(std::string* key, const char* str) { key->append(str); }
A
Adam 已提交
364

A
Adam 已提交
365 366
template <typename T>
inline void AppendKey(std::string* key, const std::vector<T>& dims) {
367
  for (size_t i = 0; i < dims.size(); i++) {
A
Adam 已提交
368 369 370 371
    AppendKey(key, std::to_string(dims[i]));
  }
}

372 373 374
template <typename... ArgTypes>
inline std::string CreateKey(ArgTypes&&... args) {
  std::string key;
375
  key.reserve(64);
376
  using expand_type = int[];
377
  expand_type{0, (AppendKey(&key, std::forward<ArgTypes>(args)), 0)...};
378 379 380
  return key;
}

A
Adam 已提交
381 382
inline std::vector<std::vector<int64_t>> ToMkldnnPadding(
    const std::vector<int64_t>& paddings) {
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
  if (paddings.size() == 6) {
    int padding_front = paddings[0];
    int padding_back = paddings[1];
    int padding_top = paddings[2];
    int padding_bottom = paddings[3];
    int padding_left = paddings[4];
    int padding_right = paddings[5];

    return {{padding_front, padding_top, padding_left},
            {padding_back, padding_bottom, padding_right}};
  } else {
    int padding_top = paddings[0];
    int padding_bottom = paddings[1];
    int padding_left = paddings[2];
    int padding_right = paddings[3];

    return {{padding_top, padding_left}, {padding_bottom, padding_right}};
  }
}

T
tensor-tang 已提交
403 404
}  // namespace platform
}  // namespace paddle