未验证 提交 1ba81500 编写于 作者: P piotrekobiIntel 提交者: GitHub

Add third batch of deprecated mkldnn namespace name changes (#37558)

上级 6b8a6220
......@@ -48,7 +48,7 @@ void* AlignedMalloc(size_t size) {
void* p = nullptr;
size_t alignment = 32ul;
#ifdef PADDLE_WITH_MKLDNN
// refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp
// refer to https://github.com/01org/mkl-dnn/blob/master/include/dnnl.hpp
// memory alignment
alignment = 4096ul;
#endif
......
......@@ -30,11 +30,10 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
public:
GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const platform::MKLDNNDeviceContext& dev_ctx,
const mkldnn::engine mkldnn_engine,
platform::Place cpu_place, const LoDTensor* input,
const Tensor* weight_h, const Tensor* h0,
const bool is_reverse, const int64_t N, const int64_t Ti,
const int64_t IC, const int64_t OC,
const dnnl::engine mkldnn_engine, platform::Place cpu_place,
const LoDTensor* input, const Tensor* weight_h,
const Tensor* h0, const bool is_reverse, const int64_t N,
const int64_t Ti, const int64_t IC, const int64_t OC,
const std::string& unique_name)
: RNNMKLDNNHandler<T, dnnl::gru_forward, T_out>(
ctx, dev_ctx, mkldnn_engine, ctx.GetPlace(), input, weight_h, h0,
......
......@@ -730,7 +730,7 @@ class MatMulOp : public framework::OperatorWithKernel {
OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");
#ifdef PADDLE_WITH_MKLDNN
using mkldnn::memory;
using dnnl::memory;
if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
......
......@@ -36,9 +36,8 @@ class CastMKLDNNKernel : public framework::OpKernel<T> {
auto x_paddle_type = framework::proto::VarType::Type(in_dtype);
auto out_paddle_type = framework::proto::VarType::Type(out_dtype);
mkldnn::memory::data_type x_type =
framework::ToMKLDNNDataType(x_paddle_type);
mkldnn::memory::data_type out_type =
dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x_paddle_type);
dnnl::memory::data_type out_type =
framework::ToMKLDNNDataType(out_paddle_type);
auto x_tz = framework::vectorize(x->dims());
......
......@@ -76,9 +76,9 @@ class ExpandMKLDNNKernel : public paddle::framework::OpKernel<T> {
private:
dnnl::memory::format_tag GetExtendedFormatTag(
std::vector<int64_t>& dims, int new_size,
mkldnn::memory::format_tag format_tag) const {
mkldnn::memory::desc md(dims, paddle::platform::MKLDNNGetDataType<T>(),
format_tag);
dnnl::memory::format_tag format_tag) const {
dnnl::memory::desc md(dims, paddle::platform::MKLDNNGetDataType<T>(),
format_tag);
std::vector<int64_t> new_dims(new_size, 1);
std::copy(dims.begin(), dims.end(),
new_dims.begin() + new_size - dims.size());
......@@ -112,7 +112,7 @@ class ExpandGradMKLDNNKernel : public paddle::framework::OpKernel<T> {
auto& astream = MKLDNNDeviceContext::tls().get_stream();
if (dout_vec_dims == dx_vec_dims) {
mkldnn::memory::data_type dout_type =
dnnl::memory::data_type dout_type =
paddle::framework::ToMKLDNNDataType(dout->type());
paddle::platform::ReorderMKLDNNHandler reorder_handler(
dout_vec_dims, dout->type(), dout_type, onednn_engine);
......
......@@ -43,7 +43,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
}
tensor->set_layout(DataLayout::kMKLDNN);
tensor->set_format(mkldnn::memory::format_tag::oihw);
tensor->set_format(dnnl::memory::format_tag::oihw);
}
};
} // namespace operators
......
......@@ -25,7 +25,7 @@ class LayerNormMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT<
LayerNormMKLDNNHandler(const std::vector<int64_t>& dims, const float& epsilon,
const dnnl::normalization_flags& flags,
const bool& is_test, const MKLDNNMemoryFormat fmt,
const mkldnn::engine engine, platform::Place cpu_place)
const dnnl::engine engine, platform::Place cpu_place)
: platform::MKLDNNHandlerNoCachingT<T, dnnl::layer_normalization_forward>(
engine, cpu_place) {
auto md = dnnl::memory::desc(dims, platform::MKLDNNGetDataType<T>(), fmt);
......@@ -131,7 +131,7 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
}
if (with_scaleshift) {
std::shared_ptr<mkldnn::memory> scaleshift_memory =
std::shared_ptr<dnnl::memory> scaleshift_memory =
handler.AcquireScaleShiftMemory(scale, bias);
args.insert({DNNL_ARG_SCALE_SHIFT, *scaleshift_memory});
}
......
......@@ -32,7 +32,7 @@ class MatMulGradMKLDNNKernel : public framework::OpKernel<T> {
private:
void ExecuteMatMulGrad(const ExecutionContext& ctx,
const MKLDNNDeviceContext& dev_ctx,
const mkldnn::engine& engine, Tensor* x, bool trans_x,
const dnnl::engine& engine, Tensor* x, bool trans_x,
bool is_fold_init_dims_x, Tensor* y, bool trans_y,
bool is_fold_init_dims_y, Tensor* out) const;
void RunKernel(const ExecutionContext& ctx) const;
......
......@@ -33,7 +33,7 @@ template <typename T>
class MatMulV2MKLDNNHandler
: public paddle::platform::MKLDNNHandlerNoCachingT<T, dnnl::matmul> {
public:
MatMulV2MKLDNNHandler(const mkldnn::engine engine,
MatMulV2MKLDNNHandler(const dnnl::engine engine,
paddle::platform::Place cpu_place,
const std::vector<int64_t>& x_org_dims, bool trans_x,
const std::vector<int64_t>& y_org_dims, bool trans_y,
......@@ -132,7 +132,7 @@ class MatMulV2MKLDNNKernel
protected:
void ExecuteMatMul(const ExecutionContext& ctx,
const MKLDNNDeviceContext& dev_ctx,
const mkldnn::engine onednn_engine,
const dnnl::engine onednn_engine,
paddle::platform::Place cpu_place, const Tensor* x,
std::vector<int64_t>& x_dims, bool trans_x,
const Tensor* y, std::vector<int64_t>& y_dims,
......@@ -272,7 +272,7 @@ class MatMulV2GradMKLDNNKernel : public MatMulV2MKLDNNKernel<T> {
void ReduceSumForMatmulGradOutput(const ExecutionContext& ctx,
const MKLDNNDeviceContext& dev_ctx,
const mkldnn::engine onednn_engine,
const dnnl::engine onednn_engine,
const Tensor* dx_tmp, Tensor* dx,
std::vector<int64_t> dx_dims) const {
paddle::platform::ReductionMKLDNNHandler<T> handler(
......
......@@ -31,7 +31,7 @@ class PReluMKLDNNHandler
dnnl::prelu_backward> {
public:
PReluMKLDNNHandler(const MKLDNNDeviceContext& dev_ctx,
const mkldnn::engine engine, platform::Place cpu_place,
const dnnl::engine engine, platform::Place cpu_place,
const Tensor* x, const Tensor* weights,
const std::string& uniq_name, const std::string& mode,
bool is_test = false)
......
......@@ -74,7 +74,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
auto x_vec_dims = framework::vectorize(x_dims);
mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type());
dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type());
platform::ReorderMKLDNNHandler reorder_handler(x_vec_dims, x->type(),
x_type, onednn_engine);
......@@ -197,7 +197,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
}
protected:
static mkldnn::memory::format_tag getPlainFormatTag(const Tensor* tensor) {
static dnnl::memory::format_tag getPlainFormatTag(const Tensor* tensor) {
auto tensor_dims_size = tensor->dims().size();
PADDLE_ENFORCE_EQ(
tensor_dims_size <= 6 && tensor_dims_size >= 1, true,
......@@ -206,17 +206,17 @@ class ReshapeMKLDNNKernel : public framework::OpKernel<T> {
switch (tensor_dims_size) {
case 1:
return mkldnn::memory::format_tag::a;
return dnnl::memory::format_tag::a;
case 2:
return mkldnn::memory::format_tag::ab;
return dnnl::memory::format_tag::ab;
case 3:
return mkldnn::memory::format_tag::abc;
return dnnl::memory::format_tag::abc;
case 4:
return mkldnn::memory::format_tag::abcd;
return dnnl::memory::format_tag::abcd;
case 5:
return mkldnn::memory::format_tag::abcde;
return dnnl::memory::format_tag::abcde;
default:
return mkldnn::memory::format_tag::abcdef;
return dnnl::memory::format_tag::abcdef;
}
}
......@@ -324,7 +324,7 @@ class ReshapeGradMKLDNNKernel : public ReshapeMKLDNNKernel<T, op_name> {
auto dout_vec_dims = framework::vectorize(dout->dims());
mkldnn::memory::data_type dout_type =
dnnl::memory::data_type dout_type =
framework::ToMKLDNNDataType(dout->type());
platform::ReorderMKLDNNHandler reorder_handler(dout_vec_dims, dout->type(),
dout_type, onednn_engine);
......
......@@ -15,24 +15,24 @@ limitations under the License. */
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/mkldnn_reuse.h"
static mkldnn::memory::format_tag get_plain_format_tag(
static dnnl::memory::format_tag get_plain_format_tag(
const paddle::framework::Tensor* tensor) {
auto tensor_dims_size = tensor->dims().size();
switch (tensor_dims_size) {
case 1:
return mkldnn::memory::format_tag::a;
return dnnl::memory::format_tag::a;
case 2:
return mkldnn::memory::format_tag::ab;
return dnnl::memory::format_tag::ab;
case 3:
return mkldnn::memory::format_tag::abc;
return dnnl::memory::format_tag::abc;
case 4:
return mkldnn::memory::format_tag::abcd;
return dnnl::memory::format_tag::abcd;
case 5:
return mkldnn::memory::format_tag::abcde;
return dnnl::memory::format_tag::abcde;
}
return mkldnn::memory::format_tag::abcdef;
return dnnl::memory::format_tag::abcdef;
}
namespace paddle {
......@@ -97,7 +97,7 @@ class SliceMKLDNNKernel : public framework::OpKernel<T> {
out->Resize(framework::make_ddim(slice_dims));
mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type());
dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type());
platform::ReorderMKLDNNHandler reorder_handler(x_vec_dims, x->type(),
x_type, onednn_engine);
......@@ -192,11 +192,11 @@ class SliceGradMKLDNNKernel : public framework::OpKernel<T> {
slice_dims[axes[i]] = ends[i] - starts[i];
}
mkldnn::memory::data_type dout_type =
dnnl::memory::data_type dout_type =
framework::ToMKLDNNDataType(dout->type());
mkldnn::memory::desc md(dout_vec_dims, platform::MKLDNNGetDataType<T>(),
dout->format());
mkldnn::memory::format_tag reorder_format_tag =
dnnl::memory::desc md(dout_vec_dims, platform::MKLDNNGetDataType<T>(),
dout->format());
dnnl::memory::format_tag reorder_format_tag =
platform::GetMKLDNNFormat(md.reshape(slice_dims));
platform::ReorderMKLDNNHandler reorder_handler(slice_dims, dout->type(),
......
......@@ -25,7 +25,7 @@ class SoftplusMKLDNNHandler
: public platform::MKLDNNHandlerNoCachingT<T, dnnl::binary> {
public:
SoftplusMKLDNNHandler(const framework::ExecutionContext& ctx, const Tensor* x,
const float beta, const mkldnn::engine engine)
const float beta, const dnnl::engine engine)
: platform::MKLDNNHandlerNoCachingT<T, dnnl::binary>(engine,
ctx.GetPlace()) {
auto x_tz = framework::vectorize(x->dims());
......@@ -53,7 +53,7 @@ class SoftplusMKLDNNHandler
x_md, beta_md, x_md);
}
std::shared_ptr<mkldnn::memory> AcquireBetaMemory(const float* beta) {
std::shared_ptr<dnnl::memory> AcquireBetaMemory(const float* beta) {
return this->AcquireMemoryFromPrimitive(
this->fwd_pd_->src1_desc(), platform::to_void_cast<float>(beta));
}
......
......@@ -90,7 +90,7 @@ class SplitMKLDNNKernel : public framework::OpKernel<T> {
auto x_vec_dims = framework::vectorize(x_dims);
mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type());
dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type());
auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
......
......@@ -29,14 +29,14 @@ class TransposeMKLDNNHandler {
public:
TransposeMKLDNNHandler(std::vector<int64_t>& dims, // NOLINT
std::vector<int>& axis, // NOLINT
mkldnn::engine engine)
dnnl::engine engine)
: dims_(dims),
axis_(axis),
logical_axis_(dims.size(), 0),
engine_(engine) {}
std::shared_ptr<mkldnn::memory> AcquireSrcMemory(
const MKLDNNMemoryFormat& fmt, void* ptr) {
std::shared_ptr<dnnl::memory> AcquireSrcMemory(const MKLDNNMemoryFormat& fmt,
void* ptr) {
// Make memory descriptor using input format, unless it
// cannot be trusted (nchw) then make up memory fmt manually
for (size_t i = 0; i < this->logical_axis_.size(); ++i) {
......@@ -47,26 +47,26 @@ class TransposeMKLDNNHandler {
? platform::MKLDNNMemDesc(
dims_, platform::MKLDNNGetDataType<T>(), fmt)
: Axis2MemoryDesc(dims_, logical_axis_);
return std::make_shared<mkldnn::memory>(src_md, engine_, ptr);
return std::make_shared<dnnl::memory>(src_md, engine_, ptr);
}
std::shared_ptr<mkldnn::memory> AcquireDstMemory(framework::Tensor* output,
platform::Place place) {
std::shared_ptr<dnnl::memory> AcquireDstMemory(framework::Tensor* output,
platform::Place place) {
auto dst_md = Axis2MemoryDesc(dims_, axis_);
auto dst_data = output->mutable_data<T>(place, dst_md.get_size());
return std::make_shared<mkldnn::memory>(dst_md, engine_, dst_data);
return std::make_shared<dnnl::memory>(dst_md, engine_, dst_data);
}
std::shared_ptr<mkldnn::reorder> AcquireTranspose(
std::shared_ptr<mkldnn::memory> dst_memory_p,
std::shared_ptr<mkldnn::memory> src_memory_p) {
return std::make_shared<mkldnn::reorder>(*(src_memory_p), *(dst_memory_p));
std::shared_ptr<dnnl::reorder> AcquireTranspose(
std::shared_ptr<dnnl::memory> dst_memory_p,
std::shared_ptr<dnnl::memory> src_memory_p) {
return std::make_shared<dnnl::reorder>(*(src_memory_p), *(dst_memory_p));
}
protected:
mkldnn::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz, // NOLINT
std::vector<int>& axis // NOLINT
) {
dnnl::memory::desc Axis2MemoryDesc(std::vector<int64_t>& nchw_tz, // NOLINT
std::vector<int>& axis // NOLINT
) {
size_t ndims = axis.size();
std::vector<int64_t> strides(ndims);
......@@ -75,8 +75,8 @@ class TransposeMKLDNNHandler {
strides[axis[i]] = total_stride;
total_stride *= nchw_tz[axis[i]];
}
mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
strides);
dnnl::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType<T>(),
strides);
return mem_d;
}
......@@ -85,7 +85,7 @@ class TransposeMKLDNNHandler {
std::vector<int64_t> dims_;
std::vector<int> axis_;
std::vector<int> logical_axis_;
mkldnn::engine engine_;
dnnl::engine engine_;
};
template <typename T>
......
......@@ -72,7 +72,7 @@ class SGDOp : public framework::OperatorWithKernel {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Param");
#ifdef PADDLE_WITH_MKLDNN
using mkldnn::memory;
using dnnl::memory;
if (this->CanMKLDNNBeUsed(ctx, data_type)) {
const auto *param_var = ctx.InputVar("Param");
const auto *grad_var = ctx.InputVar("Grad");
......
......@@ -69,7 +69,7 @@ class ReduceMKLDNNKernel : public framework::OpKernel<T> {
// In that case reorder must be executed to maintain compatibility with
// PaddlePaddle reduce op
if (input_dims == output_dims) {
mkldnn::memory::data_type input_type =
dnnl::memory::data_type input_type =
framework::ToMKLDNNDataType(input->type());
platform::ReorderMKLDNNHandler reorder_handler(input_dims, input->type(),
input_type, onednn_engine);
......@@ -132,7 +132,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel<T> {
auto* input_dy = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* output_dx = ctx.Output<Tensor>(framework::GradVarName("X"));
mkldnn::memory::format_tag x_format_tag;
dnnl::memory::format_tag x_format_tag;
auto input_dims =
CalculateReducedDims(output_dx, input_dy, dims, reduce_all, keep_dim);
auto output_dims = framework::vectorize(output_dx->dims());
......@@ -175,7 +175,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel<T> {
}
protected:
mkldnn::memory::format_tag getPlainFormatTag(const Tensor* tensor) const {
dnnl::memory::format_tag getPlainFormatTag(const Tensor* tensor) const {
auto tensor_dims_size = tensor->dims().size();
PADDLE_ENFORCE_EQ(
tensor_dims_size <= 5 && tensor_dims_size >= 1, true,
......@@ -184,16 +184,16 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel<T> {
switch (tensor_dims_size) {
case 1:
return mkldnn::memory::format_tag::a;
return dnnl::memory::format_tag::a;
case 2:
return mkldnn::memory::format_tag::ab;
return dnnl::memory::format_tag::ab;
case 3:
return mkldnn::memory::format_tag::abc;
return dnnl::memory::format_tag::abc;
case 4:
return mkldnn::memory::format_tag::abcd;
return dnnl::memory::format_tag::abcd;
}
return mkldnn::memory::format_tag::abcde;
return dnnl::memory::format_tag::abcde;
}
};
......
......@@ -48,7 +48,6 @@ limitations under the License. */
#ifdef PADDLE_WITH_MKLDNN
#include "dnnl.hpp"
#include "paddle/fluid/framework/data_layout.h"
namespace mkldnn = dnnl;
#endif
#include <map>
......@@ -65,10 +64,6 @@ namespace mkldnn = dnnl;
#endif
#include "unsupported/Eigen/CXX11/Tensor"
// This aias is required for now so that namespace name changes can be made to
// less than 20 files at a time. After all the names are changed it will be
// removed.
namespace Eigen {
struct DefaultDevice;
struct GpuDevice;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册