未验证 提交 269bd1fe 编写于 作者: P piotrekobi 提交者: GitHub

[PHI] Move oneDNN helper classes to new location (#45626)

* gaussian random

* mkldnn to onednn renaming

* fix merge conflicts

* remove fluid code

* onednn renaming

* Move classes from mkldnn_reuse.h to onednn_reuse.h

* Move more functions from mkldnn_helper.h to onednn_helpper.h

* Change MKLDNN to OneDNN in VLOG message
Co-authored-by: NSilv3S <slawomir.siwek@intel.com>
上级 4e4f4586
...@@ -212,10 +212,10 @@ std::shared_ptr<OperatorBase> TransferLayout(const std::string& var_name, ...@@ -212,10 +212,10 @@ std::shared_ptr<OperatorBase> TransferLayout(const std::string& var_name,
out_layout = framework::DataLayout::kNCHW; out_layout = framework::DataLayout::kNCHW;
} }
if (in_layout == framework::DataLayout::MKLDNN && if (in_layout == framework::DataLayout::ONEDNN &&
out_layout != framework::DataLayout::MKLDNN) { out_layout != framework::DataLayout::ONEDNN) {
auto target_layout = phi::OneDNNContext::tls().get_cur_paddle_data_layout(); auto target_layout = phi::OneDNNContext::tls().get_cur_paddle_data_layout();
VLOG(4) << "TransDataLayoutFromMKLDNN: " << in_layout << "->" VLOG(4) << "TransDataLayoutFromOneDNN: " << in_layout << "->"
<< target_layout; << target_layout;
if (out_layout == DataLayout::kNCHW && if (out_layout == DataLayout::kNCHW &&
......
...@@ -75,7 +75,7 @@ TEST(PhiUtils, TransOpKernelTypeToPhiKernelKey) { ...@@ -75,7 +75,7 @@ TEST(PhiUtils, TransOpKernelTypeToPhiKernelKey) {
auto kernel_key_mkldnn = auto kernel_key_mkldnn =
paddle::framework::TransOpKernelTypeToPhiKernelKey(op_kernel_type_mkldnn); paddle::framework::TransOpKernelTypeToPhiKernelKey(op_kernel_type_mkldnn);
ASSERT_EQ(kernel_key_mkldnn.dtype(), phi::DataType::FLOAT32); ASSERT_EQ(kernel_key_mkldnn.dtype(), phi::DataType::FLOAT32);
ASSERT_EQ(kernel_key_mkldnn.layout(), phi::DataLayout::MKLDNN); ASSERT_EQ(kernel_key_mkldnn.layout(), phi::DataLayout::ONEDNN);
ASSERT_EQ(kernel_key_mkldnn.backend(), phi::Backend::ONEDNN); ASSERT_EQ(kernel_key_mkldnn.backend(), phi::Backend::ONEDNN);
#endif #endif
......
...@@ -56,7 +56,7 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) { ...@@ -56,7 +56,7 @@ BackendSet GetTensorBackendSet(const phi::TensorBase& t) {
if (HasAllocation(t) && t.place().GetType() != AllocationType::UNDEFINED) { if (HasAllocation(t) && t.place().GetType() != AllocationType::UNDEFINED) {
BackendSet backend_set(phi::TransToPhiBackend(t.place())); BackendSet backend_set(phi::TransToPhiBackend(t.place()));
switch (t.layout()) { switch (t.layout()) {
case DataLayout::MKLDNN: case DataLayout::ONEDNN:
backend_set = backend_set | BackendSet(Backend::ONEDNN); backend_set = backend_set | BackendSet(Backend::ONEDNN);
break; break;
default: default:
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#pragma once #pragma once
#include <thread>
#include "dnnl.hpp" // NOLINT #include "dnnl.hpp" // NOLINT
#include "glog/logging.h" #include "glog/logging.h"
...@@ -94,6 +95,106 @@ inline dnnl::memory::format_tag GetPlainOneDNNFormat(int tensor_rank) { ...@@ -94,6 +95,106 @@ inline dnnl::memory::format_tag GetPlainOneDNNFormat(int tensor_rank) {
} }
} }
template <typename Type>
dnnl::memory::data_type oneDNNGetDataType() {
return dnnl::memory::data_type::undef;
}
template <>
inline dnnl::memory::data_type oneDNNGetDataType<float>() {
return dnnl::memory::data_type::f32;
}
template <>
inline dnnl::memory::data_type oneDNNGetDataType<int32_t>() {
return dnnl::memory::data_type::s32;
}
template <>
inline dnnl::memory::data_type oneDNNGetDataType<int8_t>() {
return dnnl::memory::data_type::s8;
}
template <>
inline dnnl::memory::data_type oneDNNGetDataType<uint8_t>() {
return dnnl::memory::data_type::u8;
}
template <>
inline dnnl::memory::data_type oneDNNGetDataType<dtype::bfloat16>() {
return dnnl::memory::data_type::bf16;
}
inline std::vector<std::vector<int64_t>> ToOneDNNPadding(
const std::vector<int64_t>& paddings) {
if (paddings.size() == 6) {
int padding_front = paddings[0];
int padding_back = paddings[1];
int padding_top = paddings[2];
int padding_bottom = paddings[3];
int padding_left = paddings[4];
int padding_right = paddings[5];
return {{padding_front, padding_top, padding_left},
{padding_back, padding_bottom, padding_right}};
} else {
int padding_top = paddings[0];
int padding_bottom = paddings[1];
int padding_left = paddings[2];
int padding_right = paddings[3];
return {{padding_top, padding_left}, {padding_bottom, padding_right}};
}
}
template <typename T>
inline void AppendKey(std::string* key, const T& num) {
key->append(std::to_string(num));
}
template <>
inline void AppendKey(std::string* key,
const dnnl::memory::format_tag& format) {
key->append(std::to_string(static_cast<int>(format)));
}
template <>
inline void AppendKey(std::string* key,
const dnnl::memory::data_type& data_type) {
key->append(std::to_string(static_cast<int>(data_type)));
}
template <>
inline void AppendKey(std::string* key, const dnnl::algorithm& algorithm) {
key->append(std::to_string(static_cast<int>(algorithm)));
}
template <>
inline void AppendKey(std::string* key,
const dnnl::normalization_flags& flags) {
key->append(std::to_string(static_cast<int>(flags)));
}
inline void AppendKey(std::string* key, const std::string& str) {
key->append(str);
}
inline void AppendKey(std::string* key, const char* str) { key->append(str); }
template <typename T>
inline void AppendKey(std::string* key, const std::vector<T>& dims) {
for (size_t i = 0; i < dims.size(); i++) {
AppendKey(key, std::to_string(dims[i]));
}
}
template <typename... ArgTypes>
inline std::string CreateKey(const OneDNNContext& dev_ctx, ArgTypes&&... args) {
std::string key;
key.reserve(64);
using expand_type = int[];
expand_type{0, (AppendKey(&key, std::forward<ArgTypes>(args)), 0)...};
key += OneDNNContext::tls().get_key_suffix();
return key;
}
inline void MatchShapeToLayout(DenseTensor* tensor_in, inline void MatchShapeToLayout(DenseTensor* tensor_in,
DataLayout from, DataLayout from,
DataLayout to) { DataLayout to) {
...@@ -117,28 +218,28 @@ inline void MatchShapeToLayout(DenseTensor* tensor_in, ...@@ -117,28 +218,28 @@ inline void MatchShapeToLayout(DenseTensor* tensor_in,
// at last nhwC, so for dim==2 these layouts are the same and nothing should // at last nhwC, so for dim==2 these layouts are the same and nothing should
// be done. Similarly for dim==1 when you have just one possible combination. // be done. Similarly for dim==1 when you have just one possible combination.
if (tensor_in->dims().size() < 3) { if (tensor_in->dims().size() < 3) {
VLOG(3) << "Keeping MKLDNN/NHWC/NDHWC output_shape" VLOG(3) << "Keeping ONEDNN/NHWC/NDHWC output_shape"
<< print_dims(phi::vectorize<int>(tensor_in->dims())); << print_dims(phi::vectorize<int>(tensor_in->dims()));
return; return;
} }
switch (from) { switch (from) {
case DataLayout::MKLDNN: case DataLayout::ONEDNN:
if ((to == DataLayout::NHWC) || (to == DataLayout::NDHWC)) { if ((to == DataLayout::NHWC) || (to == DataLayout::NDHWC)) {
auto dims = phi::vectorize<int>(tensor_in->dims()); auto dims = phi::vectorize<int>(tensor_in->dims());
std::rotate(dims.begin() + 1, dims.begin() + 2, dims.end()); std::rotate(dims.begin() + 1, dims.begin() + 2, dims.end());
tensor_in->Resize(phi::make_ddim(dims)); tensor_in->Resize(phi::make_ddim(dims));
VLOG(3) << "Rotating Shape from: MKLDNN to: NHWC/NDHWC output_shape" VLOG(3) << "Rotating Shape from: ONEDNN to: NHWC/NDHWC output_shape"
<< print_dims(dims); << print_dims(dims);
} }
break; break;
case DataLayout::NHWC: case DataLayout::NHWC:
case DataLayout::NDHWC: case DataLayout::NDHWC:
if (to == DataLayout::MKLDNN) { if (to == DataLayout::ONEDNN) {
auto dims = phi::vectorize<int>(tensor_in->dims()); auto dims = phi::vectorize<int>(tensor_in->dims());
std::rotate(dims.begin() + 1, dims.end() - 1, dims.end()); std::rotate(dims.begin() + 1, dims.end() - 1, dims.end());
tensor_in->Resize(phi::make_ddim(dims)); tensor_in->Resize(phi::make_ddim(dims));
VLOG(3) << "Rotating Shape from: NHWC/NDHWC to: MKLDNN output_shape" VLOG(3) << "Rotating Shape from: NHWC/NDHWC to: ONEDNN output_shape"
<< print_dims(dims); << print_dims(dims);
} }
break; break;
...@@ -158,5 +259,22 @@ inline dnnl::memory::desc OneDNNMemDesc(const std::vector<int64_t>& dims, ...@@ -158,5 +259,22 @@ inline dnnl::memory::desc OneDNNMemDesc(const std::vector<int64_t>& dims,
return dnnl::memory::desc({dims}, data_type, format); return dnnl::memory::desc({dims}, data_type, format);
} }
inline std::string ThreadIDasStr(void) {
return std::to_string(
std::hash<std::thread::id>()(std::this_thread::get_id()));
}
inline std::string ExtendKeyWithThreadInfoIfNeeded(const OneDNNContext& dev_ctx,
const std::string& key) {
return (OneDNNContext::tls().is_tid_used_in_key() == true)
? key + "-t:" + ThreadIDasStr()
: key;
}
template <typename T>
bool constexpr is_int8() {
return std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value;
}
} // namespace funcs } // namespace funcs
} // namespace phi } // namespace phi
...@@ -32,7 +32,7 @@ namespace experimental { ...@@ -32,7 +32,7 @@ namespace experimental {
* more specific, we need to distinguish the calculation method. * more specific, we need to distinguish the calculation method.
* *
* Such as the kernel for CPU device, it can be a native CPU kernel, * Such as the kernel for CPU device, it can be a native CPU kernel,
* or a kernel implemented by MKLDNN library. * or a kernel implemented by oneDNN library.
* *
* Note(chenweihang): HIP is not needed now, we can added it if needed * Note(chenweihang): HIP is not needed now, we can added it if needed
* in the future * in the future
......
...@@ -40,7 +40,7 @@ enum class DataLayout { ...@@ -40,7 +40,7 @@ enum class DataLayout {
NCHW, NCHW,
NCDHW, NCDHW,
NDHWC, NDHWC,
MKLDNN, ONEDNN,
SPARSE_COO, SPARSE_COO,
SPARSE_CSR, SPARSE_CSR,
PSTRING_UNION, PSTRING_UNION,
...@@ -62,7 +62,7 @@ enum class DataLayout { ...@@ -62,7 +62,7 @@ enum class DataLayout {
kAnyLayout = ANY, kAnyLayout = ANY,
kNHWC = NHWC, kNHWC = NHWC,
kNCHW = NCHW, kNCHW = NCHW,
kMKLDNN = MKLDNN, // all layouts supported by MKLDNN internally kMKLDNN = ONEDNN, // all layouts supported by ONEDNN internally
kNDHWC = NDHWC, kNDHWC = NDHWC,
kNCDHW = NCDHW, kNCDHW = NCDHW,
}; };
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#include "paddle/phi/kernels/log_softmax_kernel.h" #include "paddle/phi/kernels/log_softmax_kernel.h"
#include "paddle/fluid/platform/mkldnn_reuse.h"
#include "paddle/phi/backends/onednn/onednn_context.h" #include "paddle/phi/backends/onednn/onednn_context.h"
#include "paddle/phi/backends/onednn/onednn_reuse.h"
#include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/place.h" #include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
...@@ -23,16 +23,15 @@ ...@@ -23,16 +23,15 @@
namespace phi { namespace phi {
template <typename T> template <typename T>
class LogSoftmaxMKLDNNHandler class LogSoftmaxOneDNNHandler
: public paddle::platform:: : public funcs::OneDNNHandlerNoCachingT<T, dnnl::logsoftmax_forward> {
MKLDNNHandlerNoCachingT<T, dnnl::logsoftmax_forward> {
public: public:
LogSoftmaxMKLDNNHandler(const dnnl::engine mkldnn_engine, LogSoftmaxOneDNNHandler(const dnnl::engine onednn_engine,
Place cpu_place, Place cpu_place,
const DenseTensor& x, const DenseTensor& x,
const int axis) const int axis)
: paddle::platform::MKLDNNHandlerNoCachingT<T, dnnl::logsoftmax_forward>( : funcs::OneDNNHandlerNoCachingT<T, dnnl::logsoftmax_forward>(
mkldnn_engine, cpu_place) { onednn_engine, cpu_place) {
this->AcquireForwardPrimitiveDescriptor( this->AcquireForwardPrimitiveDescriptor(
dnnl::prop_kind::forward_inference, x.mem_desc(), axis); dnnl::prop_kind::forward_inference, x.mem_desc(), axis);
} }
...@@ -43,11 +42,11 @@ void LogSoftmaxKernel(const Context& dev_ctx, ...@@ -43,11 +42,11 @@ void LogSoftmaxKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
int axis, int axis,
DenseTensor* out) { DenseTensor* out) {
const auto& mkldnn_engine = dev_ctx.GetEngine(); const auto& onednn_engine = dev_ctx.GetEngine();
axis = axis >= 0 ? axis : x.dims().size() + axis; axis = axis >= 0 ? axis : x.dims().size() + axis;
LogSoftmaxMKLDNNHandler<T> handler( LogSoftmaxOneDNNHandler<T> handler(
mkldnn_engine, dev_ctx.GetPlace(), x, axis); onednn_engine, dev_ctx.GetPlace(), x, axis);
auto src_memory_p = handler.AcquireSrcMemory(&x); auto src_memory_p = handler.AcquireSrcMemory(&x);
auto dst_memory_p = handler.AcquireDstMemory(out); auto dst_memory_p = handler.AcquireDstMemory(out);
......
...@@ -97,7 +97,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, ...@@ -97,7 +97,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx,
// NOTE(zhiqiu): to handle the special case in ApplyDataTransform() in // NOTE(zhiqiu): to handle the special case in ApplyDataTransform() in
// data_transfer.cc // data_transfer.cc
if (!x.IsInitialized() && src_layout == DataLayout::MKLDNN && if (!x.IsInitialized() && src_layout == DataLayout::ONEDNN &&
dst_layout == DataLayout::NHWC) { dst_layout == DataLayout::NHWC) {
VLOG(4) << src_layout << "->" << dst_layout << " " << x.layout(); VLOG(4) << src_layout << "->" << dst_layout << " " << x.layout();
out->Resize(x.dims()); out->Resize(x.dims());
...@@ -106,7 +106,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, ...@@ -106,7 +106,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx,
return; return;
} }
if (src_layout != DataLayout::MKLDNN && dst_layout == DataLayout::MKLDNN) { if (src_layout != DataLayout::ONEDNN && dst_layout == DataLayout::ONEDNN) {
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
// Just set layout/format. No real transform occur // Just set layout/format. No real transform occur
auto out_format = funcs::OneDNNFormatForSize( auto out_format = funcs::OneDNNFormatForSize(
...@@ -121,16 +121,16 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, ...@@ -121,16 +121,16 @@ void TransferLayoutMKLDNN(const Context& dev_ctx,
OneDNNContext::tls().set_cur_paddle_data_layout(src_layout); OneDNNContext::tls().set_cur_paddle_data_layout(src_layout);
} }
out->set_layout(DataLayout::MKLDNN); out->set_layout(DataLayout::ONEDNN);
out->set_format(out_format); out->set_format(out_format);
} else if (src_layout == DataLayout::MKLDNN && } else if (src_layout == DataLayout::ONEDNN &&
dst_layout != DataLayout::MKLDNN) { dst_layout != DataLayout::ONEDNN) {
// Case2 - transfrom from MKLDNN OPKernel to Non-MKLDNN OPKernel // Case2 - transfrom from MKLDNN OPKernel to Non-MKLDNN OPKernel
// Do transform via MKLDNN lib // Do transform via MKLDNN lib
funcs::innerTransDataLayoutFromOneDNN( funcs::innerTransDataLayoutFromOneDNN(
src_layout, dst_layout, x, out, dev_ctx.GetPlace()); src_layout, dst_layout, x, out, dev_ctx.GetPlace());
} else if (src_layout == DataLayout::MKLDNN && } else if (src_layout == DataLayout::ONEDNN &&
dst_layout == DataLayout::MKLDNN) { dst_layout == DataLayout::ONEDNN) {
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
src_layout, src_layout,
dst_layout, dst_layout,
......
...@@ -37,7 +37,7 @@ TEST(DataLayout, OStream) { ...@@ -37,7 +37,7 @@ TEST(DataLayout, OStream) {
oss << phi::DataLayout::NCHW; oss << phi::DataLayout::NCHW;
EXPECT_EQ(oss.str(), "NCHW"); EXPECT_EQ(oss.str(), "NCHW");
oss.str(""); oss.str("");
oss << phi::DataLayout::MKLDNN; oss << phi::DataLayout::ONEDNN;
EXPECT_EQ(oss.str(), "MKLDNN"); EXPECT_EQ(oss.str(), "MKLDNN");
oss.str(""); oss.str("");
try { try {
......
...@@ -40,7 +40,7 @@ TEST(DEV_API, transfer_layout) { ...@@ -40,7 +40,7 @@ TEST(DEV_API, transfer_layout) {
DenseTensor x; DenseTensor x;
MetaTensor meta_x(&x); MetaTensor meta_x(&x);
meta_x.set_dtype(DataType::FLOAT32); meta_x.set_dtype(DataType::FLOAT32);
meta_x.set_layout(DataLayout::MKLDNN); meta_x.set_layout(DataLayout::ONEDNN);
meta_x.set_dims(make_ddim({n, c, h, w})); meta_x.set_dims(make_ddim({n, c, h, w}));
DenseTensor out; DenseTensor out;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册