From 1ba81500082a456fc60848d83d3d7ed1cdc802cb Mon Sep 17 00:00:00 2001 From: piotrekobiIntel Date: Mon, 29 Nov 2021 10:59:57 +0100 Subject: [PATCH] Add third batch of deprecated mkldnn namespace name changes (#37558) --- .../fluid/memory/detail/system_allocator.cc | 2 +- .../fused/mkldnn/fusion_gru_mkldnn_op.cc | 9 +++-- paddle/fluid/operators/matmul_op.cc | 2 +- .../fluid/operators/mkldnn/cast_mkldnn_op.cc | 5 ++- .../operators/mkldnn/expand_v2_mkldnn_op.cc | 8 ++--- .../mkldnn/gaussian_random_mkldnn_op.cc | 2 +- .../operators/mkldnn/layer_norm_mkldnn_op.cc | 4 +-- .../fluid/operators/mkldnn/matmul_mkldnn_op.h | 2 +- .../operators/mkldnn/matmul_v2_mkldnn_op.cc | 6 ++-- .../fluid/operators/mkldnn/prelu_mkldnn_op.cc | 2 +- .../operators/mkldnn/reshape_mkldnn_op.cc | 18 +++++----- .../fluid/operators/mkldnn/slice_mkldnn_op.cc | 24 ++++++------- .../operators/mkldnn/softplus_mkldnn_op.h | 4 +-- .../fluid/operators/mkldnn/split_mkldnn_op.cc | 2 +- .../operators/mkldnn/transpose_mkldnn_op.cc | 34 +++++++++---------- paddle/fluid/operators/optimizers/sgd_op.cc | 2 +- .../reduce_ops/mkldnn/reduce_mkldnn_op.h | 16 ++++----- paddle/fluid/platform/device_context.h | 5 --- 18 files changed, 70 insertions(+), 77 deletions(-) diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 54d177ea77..75b93088e5 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -48,7 +48,7 @@ void* AlignedMalloc(size_t size) { void* p = nullptr; size_t alignment = 32ul; #ifdef PADDLE_WITH_MKLDNN - // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp + // refer to https://github.com/01org/mkl-dnn/blob/master/include/dnnl.hpp // memory alignment alignment = 4096ul; #endif diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc index e1506e3708..6ef49e2cf3 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc @@ -30,11 +30,10 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler { public: GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const platform::MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine mkldnn_engine, - platform::Place cpu_place, const LoDTensor* input, - const Tensor* weight_h, const Tensor* h0, - const bool is_reverse, const int64_t N, const int64_t Ti, - const int64_t IC, const int64_t OC, + const dnnl::engine mkldnn_engine, platform::Place cpu_place, + const LoDTensor* input, const Tensor* weight_h, + const Tensor* h0, const bool is_reverse, const int64_t N, + const int64_t Ti, const int64_t IC, const int64_t OC, const std::string& unique_name) : RNNMKLDNNHandler( ctx, dev_ctx, mkldnn_engine, ctx.GetPlace(), input, weight_h, h0, diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 051f97ad4e..29e2cd08ce 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -730,7 +730,7 @@ class MatMulOp : public framework::OperatorWithKernel { OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); #ifdef PADDLE_WITH_MKLDNN - using mkldnn::memory; + using dnnl::memory; if (this->CanMKLDNNBeUsed(ctx, input_data_type)) { return framework::OpKernelType(input_data_type, ctx.GetPlace(), framework::DataLayout::kMKLDNN, diff --git a/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc index 6f79c2dccf..552e91d6ff 100644 --- a/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc @@ -36,9 +36,8 @@ class CastMKLDNNKernel : public framework::OpKernel { auto x_paddle_type = framework::proto::VarType::Type(in_dtype); auto out_paddle_type = framework::proto::VarType::Type(out_dtype); - mkldnn::memory::data_type x_type = - framework::ToMKLDNNDataType(x_paddle_type); - mkldnn::memory::data_type out_type = + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x_paddle_type); + dnnl::memory::data_type out_type = framework::ToMKLDNNDataType(out_paddle_type); auto x_tz = framework::vectorize(x->dims()); diff --git a/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc index d537c3dbf9..b1be0f0f8f 100644 --- a/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc @@ -76,9 +76,9 @@ class ExpandMKLDNNKernel : public paddle::framework::OpKernel { private: dnnl::memory::format_tag GetExtendedFormatTag( std::vector& dims, int new_size, - mkldnn::memory::format_tag format_tag) const { - mkldnn::memory::desc md(dims, paddle::platform::MKLDNNGetDataType(), - format_tag); + dnnl::memory::format_tag format_tag) const { + dnnl::memory::desc md(dims, paddle::platform::MKLDNNGetDataType(), + format_tag); std::vector new_dims(new_size, 1); std::copy(dims.begin(), dims.end(), new_dims.begin() + new_size - dims.size()); @@ -112,7 +112,7 @@ class ExpandGradMKLDNNKernel : public paddle::framework::OpKernel { auto& astream = MKLDNNDeviceContext::tls().get_stream(); if (dout_vec_dims == dx_vec_dims) { - mkldnn::memory::data_type dout_type = + dnnl::memory::data_type dout_type = paddle::framework::ToMKLDNNDataType(dout->type()); paddle::platform::ReorderMKLDNNHandler reorder_handler( dout_vec_dims, dout->type(), dout_type, onednn_engine); diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index 51fa5ad021..ab92a165c7 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -43,7 +43,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { } tensor->set_layout(DataLayout::kMKLDNN); - tensor->set_format(mkldnn::memory::format_tag::oihw); + tensor->set_format(dnnl::memory::format_tag::oihw); } }; } // namespace operators diff --git a/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc index 8ab4612ff0..1abd237acb 100644 --- a/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc @@ -25,7 +25,7 @@ class LayerNormMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT< LayerNormMKLDNNHandler(const std::vector& dims, const float& epsilon, const dnnl::normalization_flags& flags, const bool& is_test, const MKLDNNMemoryFormat fmt, - const mkldnn::engine engine, platform::Place cpu_place) + const dnnl::engine engine, platform::Place cpu_place) : platform::MKLDNNHandlerNoCachingT( engine, cpu_place) { auto md = dnnl::memory::desc(dims, platform::MKLDNNGetDataType(), fmt); @@ -131,7 +131,7 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel { } if (with_scaleshift) { - std::shared_ptr scaleshift_memory = + std::shared_ptr scaleshift_memory = handler.AcquireScaleShiftMemory(scale, bias); args.insert({DNNL_ARG_SCALE_SHIFT, *scaleshift_memory}); } diff --git a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h index 69ae78fcca..c82119d06a 100644 --- a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h +++ b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h @@ -32,7 +32,7 @@ class MatMulGradMKLDNNKernel : public framework::OpKernel { private: void ExecuteMatMulGrad(const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine& engine, Tensor* x, bool trans_x, + const dnnl::engine& engine, Tensor* x, bool trans_x, bool is_fold_init_dims_x, Tensor* y, bool trans_y, bool is_fold_init_dims_y, Tensor* out) const; void RunKernel(const ExecutionContext& ctx) const; diff --git a/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc index aa0a16944b..0457aeed61 100644 --- a/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc @@ -33,7 +33,7 @@ template class MatMulV2MKLDNNHandler : public paddle::platform::MKLDNNHandlerNoCachingT { public: - MatMulV2MKLDNNHandler(const mkldnn::engine engine, + MatMulV2MKLDNNHandler(const dnnl::engine engine, paddle::platform::Place cpu_place, const std::vector& x_org_dims, bool trans_x, const std::vector& y_org_dims, bool trans_y, @@ -132,7 +132,7 @@ class MatMulV2MKLDNNKernel protected: void ExecuteMatMul(const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine onednn_engine, + const dnnl::engine onednn_engine, paddle::platform::Place cpu_place, const Tensor* x, std::vector& x_dims, bool trans_x, const Tensor* y, std::vector& y_dims, @@ -272,7 +272,7 @@ class MatMulV2GradMKLDNNKernel : public MatMulV2MKLDNNKernel { void ReduceSumForMatmulGradOutput(const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine onednn_engine, + const dnnl::engine onednn_engine, const Tensor* dx_tmp, Tensor* dx, std::vector dx_dims) const { paddle::platform::ReductionMKLDNNHandler handler( diff --git a/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc index e2a4482666..8296b4739d 100644 --- a/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc @@ -31,7 +31,7 @@ class PReluMKLDNNHandler dnnl::prelu_backward> { public: PReluMKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine engine, platform::Place cpu_place, + const dnnl::engine engine, platform::Place cpu_place, const Tensor* x, const Tensor* weights, const std::string& uniq_name, const std::string& mode, bool is_test = false) diff --git a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc index 6c3f4ec062..06142e9553 100644 --- a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc @@ -74,7 +74,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { auto x_vec_dims = framework::vectorize(x_dims); - mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); platform::ReorderMKLDNNHandler reorder_handler(x_vec_dims, x->type(), x_type, onednn_engine); @@ -197,7 +197,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { } protected: - static mkldnn::memory::format_tag getPlainFormatTag(const Tensor* tensor) { + static dnnl::memory::format_tag getPlainFormatTag(const Tensor* tensor) { auto tensor_dims_size = tensor->dims().size(); PADDLE_ENFORCE_EQ( tensor_dims_size <= 6 && tensor_dims_size >= 1, true, @@ -206,17 +206,17 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { switch (tensor_dims_size) { case 1: - return mkldnn::memory::format_tag::a; + return dnnl::memory::format_tag::a; case 2: - return mkldnn::memory::format_tag::ab; + return dnnl::memory::format_tag::ab; case 3: - return mkldnn::memory::format_tag::abc; + return dnnl::memory::format_tag::abc; case 4: - return mkldnn::memory::format_tag::abcd; + return dnnl::memory::format_tag::abcd; case 5: - return mkldnn::memory::format_tag::abcde; + return dnnl::memory::format_tag::abcde; default: - return mkldnn::memory::format_tag::abcdef; + return dnnl::memory::format_tag::abcdef; } } @@ -324,7 +324,7 @@ class ReshapeGradMKLDNNKernel : public ReshapeMKLDNNKernel { auto dout_vec_dims = framework::vectorize(dout->dims()); - mkldnn::memory::data_type dout_type = + dnnl::memory::data_type dout_type = framework::ToMKLDNNDataType(dout->type()); platform::ReorderMKLDNNHandler reorder_handler(dout_vec_dims, dout->type(), dout_type, onednn_engine); diff --git a/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc index 6bc3413604..d9bd843a9d 100644 --- a/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc @@ -15,24 +15,24 @@ limitations under the License. */ #include "paddle/fluid/operators/utils.h" #include "paddle/fluid/platform/mkldnn_reuse.h" -static mkldnn::memory::format_tag get_plain_format_tag( +static dnnl::memory::format_tag get_plain_format_tag( const paddle::framework::Tensor* tensor) { auto tensor_dims_size = tensor->dims().size(); switch (tensor_dims_size) { case 1: - return mkldnn::memory::format_tag::a; + return dnnl::memory::format_tag::a; case 2: - return mkldnn::memory::format_tag::ab; + return dnnl::memory::format_tag::ab; case 3: - return mkldnn::memory::format_tag::abc; + return dnnl::memory::format_tag::abc; case 4: - return mkldnn::memory::format_tag::abcd; + return dnnl::memory::format_tag::abcd; case 5: - return mkldnn::memory::format_tag::abcde; + return dnnl::memory::format_tag::abcde; } - return mkldnn::memory::format_tag::abcdef; + return dnnl::memory::format_tag::abcdef; } namespace paddle { @@ -97,7 +97,7 @@ class SliceMKLDNNKernel : public framework::OpKernel { out->Resize(framework::make_ddim(slice_dims)); - mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); platform::ReorderMKLDNNHandler reorder_handler(x_vec_dims, x->type(), x_type, onednn_engine); @@ -192,11 +192,11 @@ class SliceGradMKLDNNKernel : public framework::OpKernel { slice_dims[axes[i]] = ends[i] - starts[i]; } - mkldnn::memory::data_type dout_type = + dnnl::memory::data_type dout_type = framework::ToMKLDNNDataType(dout->type()); - mkldnn::memory::desc md(dout_vec_dims, platform::MKLDNNGetDataType(), - dout->format()); - mkldnn::memory::format_tag reorder_format_tag = + dnnl::memory::desc md(dout_vec_dims, platform::MKLDNNGetDataType(), + dout->format()); + dnnl::memory::format_tag reorder_format_tag = platform::GetMKLDNNFormat(md.reshape(slice_dims)); platform::ReorderMKLDNNHandler reorder_handler(slice_dims, dout->type(), diff --git a/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h b/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h index c8c539a956..053a1d0f7d 100644 --- a/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h +++ b/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h @@ -25,7 +25,7 @@ class SoftplusMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT { public: SoftplusMKLDNNHandler(const framework::ExecutionContext& ctx, const Tensor* x, - const float beta, const mkldnn::engine engine) + const float beta, const dnnl::engine engine) : platform::MKLDNNHandlerNoCachingT(engine, ctx.GetPlace()) { auto x_tz = framework::vectorize(x->dims()); @@ -53,7 +53,7 @@ class SoftplusMKLDNNHandler x_md, beta_md, x_md); } - std::shared_ptr AcquireBetaMemory(const float* beta) { + std::shared_ptr AcquireBetaMemory(const float* beta) { return this->AcquireMemoryFromPrimitive( this->fwd_pd_->src1_desc(), platform::to_void_cast(beta)); } diff --git a/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc index 411f33276c..a8e1e6e8a0 100644 --- a/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc @@ -90,7 +90,7 @@ class SplitMKLDNNKernel : public framework::OpKernel { auto x_vec_dims = framework::vectorize(x_dims); - mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); auto& astream = platform::MKLDNNDeviceContext::tls().get_stream(); diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index a92e8e6cb0..f8698cdbb6 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -29,14 +29,14 @@ class TransposeMKLDNNHandler { public: TransposeMKLDNNHandler(std::vector& dims, // NOLINT std::vector& axis, // NOLINT - mkldnn::engine engine) + dnnl::engine engine) : dims_(dims), axis_(axis), logical_axis_(dims.size(), 0), engine_(engine) {} - std::shared_ptr AcquireSrcMemory( - const MKLDNNMemoryFormat& fmt, void* ptr) { + std::shared_ptr AcquireSrcMemory(const MKLDNNMemoryFormat& fmt, + void* ptr) { // Make memory descriptor using input format, unless it // cannot be trusted (nchw) then make up memory fmt manually for (size_t i = 0; i < this->logical_axis_.size(); ++i) { @@ -47,26 +47,26 @@ class TransposeMKLDNNHandler { ? platform::MKLDNNMemDesc( dims_, platform::MKLDNNGetDataType(), fmt) : Axis2MemoryDesc(dims_, logical_axis_); - return std::make_shared(src_md, engine_, ptr); + return std::make_shared(src_md, engine_, ptr); } - std::shared_ptr AcquireDstMemory(framework::Tensor* output, - platform::Place place) { + std::shared_ptr AcquireDstMemory(framework::Tensor* output, + platform::Place place) { auto dst_md = Axis2MemoryDesc(dims_, axis_); auto dst_data = output->mutable_data(place, dst_md.get_size()); - return std::make_shared(dst_md, engine_, dst_data); + return std::make_shared(dst_md, engine_, dst_data); } - std::shared_ptr AcquireTranspose( - std::shared_ptr dst_memory_p, - std::shared_ptr src_memory_p) { - return std::make_shared(*(src_memory_p), *(dst_memory_p)); + std::shared_ptr AcquireTranspose( + std::shared_ptr dst_memory_p, + std::shared_ptr src_memory_p) { + return std::make_shared(*(src_memory_p), *(dst_memory_p)); } protected: - mkldnn::memory::desc Axis2MemoryDesc(std::vector& nchw_tz, // NOLINT - std::vector& axis // NOLINT - ) { + dnnl::memory::desc Axis2MemoryDesc(std::vector& nchw_tz, // NOLINT + std::vector& axis // NOLINT + ) { size_t ndims = axis.size(); std::vector strides(ndims); @@ -75,8 +75,8 @@ class TransposeMKLDNNHandler { strides[axis[i]] = total_stride; total_stride *= nchw_tz[axis[i]]; } - mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType(), - strides); + dnnl::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType(), + strides); return mem_d; } @@ -85,7 +85,7 @@ class TransposeMKLDNNHandler { std::vector dims_; std::vector axis_; std::vector logical_axis_; - mkldnn::engine engine_; + dnnl::engine engine_; }; template diff --git a/paddle/fluid/operators/optimizers/sgd_op.cc b/paddle/fluid/operators/optimizers/sgd_op.cc index b2e258f815..e7c09430e9 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cc +++ b/paddle/fluid/operators/optimizers/sgd_op.cc @@ -72,7 +72,7 @@ class SGDOp : public framework::OperatorWithKernel { auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Param"); #ifdef PADDLE_WITH_MKLDNN - using mkldnn::memory; + using dnnl::memory; if (this->CanMKLDNNBeUsed(ctx, data_type)) { const auto *param_var = ctx.InputVar("Param"); const auto *grad_var = ctx.InputVar("Grad"); diff --git a/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h b/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h index 17801454da..ad7defd22d 100644 --- a/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h +++ b/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h @@ -69,7 +69,7 @@ class ReduceMKLDNNKernel : public framework::OpKernel { // In that case reorder must be executed to maintain compatibility with // PaddlePaddle reduce op if (input_dims == output_dims) { - mkldnn::memory::data_type input_type = + dnnl::memory::data_type input_type = framework::ToMKLDNNDataType(input->type()); platform::ReorderMKLDNNHandler reorder_handler(input_dims, input->type(), input_type, onednn_engine); @@ -132,7 +132,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel { auto* input_dy = ctx.Input(framework::GradVarName("Out")); auto* output_dx = ctx.Output(framework::GradVarName("X")); - mkldnn::memory::format_tag x_format_tag; + dnnl::memory::format_tag x_format_tag; auto input_dims = CalculateReducedDims(output_dx, input_dy, dims, reduce_all, keep_dim); auto output_dims = framework::vectorize(output_dx->dims()); @@ -175,7 +175,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel { } protected: - mkldnn::memory::format_tag getPlainFormatTag(const Tensor* tensor) const { + dnnl::memory::format_tag getPlainFormatTag(const Tensor* tensor) const { auto tensor_dims_size = tensor->dims().size(); PADDLE_ENFORCE_EQ( tensor_dims_size <= 5 && tensor_dims_size >= 1, true, @@ -184,16 +184,16 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel { switch (tensor_dims_size) { case 1: - return mkldnn::memory::format_tag::a; + return dnnl::memory::format_tag::a; case 2: - return mkldnn::memory::format_tag::ab; + return dnnl::memory::format_tag::ab; case 3: - return mkldnn::memory::format_tag::abc; + return dnnl::memory::format_tag::abc; case 4: - return mkldnn::memory::format_tag::abcd; + return dnnl::memory::format_tag::abcd; } - return mkldnn::memory::format_tag::abcde; + return dnnl::memory::format_tag::abcde; } }; diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index e15b62413a..7323299451 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -48,7 +48,6 @@ limitations under the License. */ #ifdef PADDLE_WITH_MKLDNN #include "dnnl.hpp" #include "paddle/fluid/framework/data_layout.h" -namespace mkldnn = dnnl; #endif #include @@ -65,10 +64,6 @@ namespace mkldnn = dnnl; #endif #include "unsupported/Eigen/CXX11/Tensor" -// This aias is required for now so that namespace name changes can be made to -// less than 20 files at a time. After all the names are changed it will be -// removed. - namespace Eigen { struct DefaultDevice; struct GpuDevice; -- GitLab