diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 54d177ea77781b0a40bf0c21fbaa2e42df686ad8..75b93088e55028be833ad4fd99df3dff0ee3cb23 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -48,7 +48,7 @@ void* AlignedMalloc(size_t size) { void* p = nullptr; size_t alignment = 32ul; #ifdef PADDLE_WITH_MKLDNN - // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp + // refer to https://github.com/01org/mkl-dnn/blob/master/include/dnnl.hpp // memory alignment alignment = 4096ul; #endif diff --git a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc index e1506e37083666366ee47773cdb850fa983bedaa..6ef49e2cf3db7318f2eb8f0f55ffccd0e3bbad15 100644 --- a/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc +++ b/paddle/fluid/operators/fused/mkldnn/fusion_gru_mkldnn_op.cc @@ -30,11 +30,10 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler { public: GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, const platform::MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine mkldnn_engine, - platform::Place cpu_place, const LoDTensor* input, - const Tensor* weight_h, const Tensor* h0, - const bool is_reverse, const int64_t N, const int64_t Ti, - const int64_t IC, const int64_t OC, + const dnnl::engine mkldnn_engine, platform::Place cpu_place, + const LoDTensor* input, const Tensor* weight_h, + const Tensor* h0, const bool is_reverse, const int64_t N, + const int64_t Ti, const int64_t IC, const int64_t OC, const std::string& unique_name) : RNNMKLDNNHandler( ctx, dev_ctx, mkldnn_engine, ctx.GetPlace(), input, weight_h, h0, diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 051f97ad4ec8de8a56407e13c7221e6f0e4d1046..29e2cd08ce9fedf8e730dc6f713a87cf6efe57a3 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -730,7 +730,7 @@ class MatMulOp : public framework::OperatorWithKernel { OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); #ifdef PADDLE_WITH_MKLDNN - using mkldnn::memory; + using dnnl::memory; if (this->CanMKLDNNBeUsed(ctx, input_data_type)) { return framework::OpKernelType(input_data_type, ctx.GetPlace(), framework::DataLayout::kMKLDNN, diff --git a/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc index 6f79c2dccf56b2683cdb961cb287afb095454509..552e91d6ff82604a03bea9a1a60e6905b03de4e2 100644 --- a/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/cast_mkldnn_op.cc @@ -36,9 +36,8 @@ class CastMKLDNNKernel : public framework::OpKernel { auto x_paddle_type = framework::proto::VarType::Type(in_dtype); auto out_paddle_type = framework::proto::VarType::Type(out_dtype); - mkldnn::memory::data_type x_type = - framework::ToMKLDNNDataType(x_paddle_type); - mkldnn::memory::data_type out_type = + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x_paddle_type); + dnnl::memory::data_type out_type = framework::ToMKLDNNDataType(out_paddle_type); auto x_tz = framework::vectorize(x->dims()); diff --git a/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc index d537c3dbf9fddfc3f64d17ad2d39d5534a928108..b1be0f0f8fb441cead7c9138268fb6d058fc7496 100644 --- a/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/expand_v2_mkldnn_op.cc @@ -76,9 +76,9 @@ class ExpandMKLDNNKernel : public paddle::framework::OpKernel { private: dnnl::memory::format_tag GetExtendedFormatTag( std::vector& dims, int new_size, - mkldnn::memory::format_tag format_tag) const { - mkldnn::memory::desc md(dims, paddle::platform::MKLDNNGetDataType(), - format_tag); + dnnl::memory::format_tag format_tag) const { + dnnl::memory::desc md(dims, paddle::platform::MKLDNNGetDataType(), + format_tag); std::vector new_dims(new_size, 1); std::copy(dims.begin(), dims.end(), new_dims.begin() + new_size - dims.size()); @@ -112,7 +112,7 @@ class ExpandGradMKLDNNKernel : public paddle::framework::OpKernel { auto& astream = MKLDNNDeviceContext::tls().get_stream(); if (dout_vec_dims == dx_vec_dims) { - mkldnn::memory::data_type dout_type = + dnnl::memory::data_type dout_type = paddle::framework::ToMKLDNNDataType(dout->type()); paddle::platform::ReorderMKLDNNHandler reorder_handler( dout_vec_dims, dout->type(), dout_type, onednn_engine); diff --git a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc index 51fa5ad021a2b284cd75f297d83326b2102c1e41..ab92a165c76d124e27c2635863846e52815c3d61 100644 --- a/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/gaussian_random_mkldnn_op.cc @@ -43,7 +43,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel { } tensor->set_layout(DataLayout::kMKLDNN); - tensor->set_format(mkldnn::memory::format_tag::oihw); + tensor->set_format(dnnl::memory::format_tag::oihw); } }; } // namespace operators diff --git a/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc index 8ab4612ff04b504b31fddaf245ce4127e5edc8ae..1abd237acbc91d5ec3e725dcf7197f19db6595d0 100644 --- a/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/layer_norm_mkldnn_op.cc @@ -25,7 +25,7 @@ class LayerNormMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT< LayerNormMKLDNNHandler(const std::vector& dims, const float& epsilon, const dnnl::normalization_flags& flags, const bool& is_test, const MKLDNNMemoryFormat fmt, - const mkldnn::engine engine, platform::Place cpu_place) + const dnnl::engine engine, platform::Place cpu_place) : platform::MKLDNNHandlerNoCachingT( engine, cpu_place) { auto md = dnnl::memory::desc(dims, platform::MKLDNNGetDataType(), fmt); @@ -131,7 +131,7 @@ class LayerNormMKLDNNOpKernel : public paddle::framework::OpKernel { } if (with_scaleshift) { - std::shared_ptr scaleshift_memory = + std::shared_ptr scaleshift_memory = handler.AcquireScaleShiftMemory(scale, bias); args.insert({DNNL_ARG_SCALE_SHIFT, *scaleshift_memory}); } diff --git a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h index 69ae78fcca08a09607fa8231bd0a9fb9c7f02cc8..c82119d06a01ea40760d1c345f36e3d50c38b74e 100644 --- a/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h +++ b/paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h @@ -32,7 +32,7 @@ class MatMulGradMKLDNNKernel : public framework::OpKernel { private: void ExecuteMatMulGrad(const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine& engine, Tensor* x, bool trans_x, + const dnnl::engine& engine, Tensor* x, bool trans_x, bool is_fold_init_dims_x, Tensor* y, bool trans_y, bool is_fold_init_dims_y, Tensor* out) const; void RunKernel(const ExecutionContext& ctx) const; diff --git a/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc index aa0a16944bcfabafa3a8184e7bc44c2c5bb9af20..0457aeed616fa33a8ac05d696ff7327f63138ce9 100644 --- a/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/matmul_v2_mkldnn_op.cc @@ -33,7 +33,7 @@ template class MatMulV2MKLDNNHandler : public paddle::platform::MKLDNNHandlerNoCachingT { public: - MatMulV2MKLDNNHandler(const mkldnn::engine engine, + MatMulV2MKLDNNHandler(const dnnl::engine engine, paddle::platform::Place cpu_place, const std::vector& x_org_dims, bool trans_x, const std::vector& y_org_dims, bool trans_y, @@ -132,7 +132,7 @@ class MatMulV2MKLDNNKernel protected: void ExecuteMatMul(const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine onednn_engine, + const dnnl::engine onednn_engine, paddle::platform::Place cpu_place, const Tensor* x, std::vector& x_dims, bool trans_x, const Tensor* y, std::vector& y_dims, @@ -272,7 +272,7 @@ class MatMulV2GradMKLDNNKernel : public MatMulV2MKLDNNKernel { void ReduceSumForMatmulGradOutput(const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine onednn_engine, + const dnnl::engine onednn_engine, const Tensor* dx_tmp, Tensor* dx, std::vector dx_dims) const { paddle::platform::ReductionMKLDNNHandler handler( diff --git a/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc index e2a4482666a1ace818777e9e7e3abaa1e6ff2f22..8296b4739d9541db6e7c0da995c8743fecbec22e 100644 --- a/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/prelu_mkldnn_op.cc @@ -31,7 +31,7 @@ class PReluMKLDNNHandler dnnl::prelu_backward> { public: PReluMKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, - const mkldnn::engine engine, platform::Place cpu_place, + const dnnl::engine engine, platform::Place cpu_place, const Tensor* x, const Tensor* weights, const std::string& uniq_name, const std::string& mode, bool is_test = false) diff --git a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc index 6c3f4ec06201a115d50074a2d9c5fd9aa63743fa..06142e95532c523edc68ea229de792f3fb4dbada 100644 --- a/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/reshape_mkldnn_op.cc @@ -74,7 +74,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { auto x_vec_dims = framework::vectorize(x_dims); - mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); platform::ReorderMKLDNNHandler reorder_handler(x_vec_dims, x->type(), x_type, onednn_engine); @@ -197,7 +197,7 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { } protected: - static mkldnn::memory::format_tag getPlainFormatTag(const Tensor* tensor) { + static dnnl::memory::format_tag getPlainFormatTag(const Tensor* tensor) { auto tensor_dims_size = tensor->dims().size(); PADDLE_ENFORCE_EQ( tensor_dims_size <= 6 && tensor_dims_size >= 1, true, @@ -206,17 +206,17 @@ class ReshapeMKLDNNKernel : public framework::OpKernel { switch (tensor_dims_size) { case 1: - return mkldnn::memory::format_tag::a; + return dnnl::memory::format_tag::a; case 2: - return mkldnn::memory::format_tag::ab; + return dnnl::memory::format_tag::ab; case 3: - return mkldnn::memory::format_tag::abc; + return dnnl::memory::format_tag::abc; case 4: - return mkldnn::memory::format_tag::abcd; + return dnnl::memory::format_tag::abcd; case 5: - return mkldnn::memory::format_tag::abcde; + return dnnl::memory::format_tag::abcde; default: - return mkldnn::memory::format_tag::abcdef; + return dnnl::memory::format_tag::abcdef; } } @@ -324,7 +324,7 @@ class ReshapeGradMKLDNNKernel : public ReshapeMKLDNNKernel { auto dout_vec_dims = framework::vectorize(dout->dims()); - mkldnn::memory::data_type dout_type = + dnnl::memory::data_type dout_type = framework::ToMKLDNNDataType(dout->type()); platform::ReorderMKLDNNHandler reorder_handler(dout_vec_dims, dout->type(), dout_type, onednn_engine); diff --git a/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc index 6bc3413604e221b3a9af16d131e4858fc0746510..d9bd843a9d0cf07ea23074c7605849cc147734ef 100644 --- a/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/slice_mkldnn_op.cc @@ -15,24 +15,24 @@ limitations under the License. */ #include "paddle/fluid/operators/utils.h" #include "paddle/fluid/platform/mkldnn_reuse.h" -static mkldnn::memory::format_tag get_plain_format_tag( +static dnnl::memory::format_tag get_plain_format_tag( const paddle::framework::Tensor* tensor) { auto tensor_dims_size = tensor->dims().size(); switch (tensor_dims_size) { case 1: - return mkldnn::memory::format_tag::a; + return dnnl::memory::format_tag::a; case 2: - return mkldnn::memory::format_tag::ab; + return dnnl::memory::format_tag::ab; case 3: - return mkldnn::memory::format_tag::abc; + return dnnl::memory::format_tag::abc; case 4: - return mkldnn::memory::format_tag::abcd; + return dnnl::memory::format_tag::abcd; case 5: - return mkldnn::memory::format_tag::abcde; + return dnnl::memory::format_tag::abcde; } - return mkldnn::memory::format_tag::abcdef; + return dnnl::memory::format_tag::abcdef; } namespace paddle { @@ -97,7 +97,7 @@ class SliceMKLDNNKernel : public framework::OpKernel { out->Resize(framework::make_ddim(slice_dims)); - mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); platform::ReorderMKLDNNHandler reorder_handler(x_vec_dims, x->type(), x_type, onednn_engine); @@ -192,11 +192,11 @@ class SliceGradMKLDNNKernel : public framework::OpKernel { slice_dims[axes[i]] = ends[i] - starts[i]; } - mkldnn::memory::data_type dout_type = + dnnl::memory::data_type dout_type = framework::ToMKLDNNDataType(dout->type()); - mkldnn::memory::desc md(dout_vec_dims, platform::MKLDNNGetDataType(), - dout->format()); - mkldnn::memory::format_tag reorder_format_tag = + dnnl::memory::desc md(dout_vec_dims, platform::MKLDNNGetDataType(), + dout->format()); + dnnl::memory::format_tag reorder_format_tag = platform::GetMKLDNNFormat(md.reshape(slice_dims)); platform::ReorderMKLDNNHandler reorder_handler(slice_dims, dout->type(), diff --git a/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h b/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h index c8c539a956505892b5b000681c58ebf298e05ce0..053a1d0f7da4985edc102620c57b4885673e3abf 100644 --- a/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h +++ b/paddle/fluid/operators/mkldnn/softplus_mkldnn_op.h @@ -25,7 +25,7 @@ class SoftplusMKLDNNHandler : public platform::MKLDNNHandlerNoCachingT { public: SoftplusMKLDNNHandler(const framework::ExecutionContext& ctx, const Tensor* x, - const float beta, const mkldnn::engine engine) + const float beta, const dnnl::engine engine) : platform::MKLDNNHandlerNoCachingT(engine, ctx.GetPlace()) { auto x_tz = framework::vectorize(x->dims()); @@ -53,7 +53,7 @@ class SoftplusMKLDNNHandler x_md, beta_md, x_md); } - std::shared_ptr AcquireBetaMemory(const float* beta) { + std::shared_ptr AcquireBetaMemory(const float* beta) { return this->AcquireMemoryFromPrimitive( this->fwd_pd_->src1_desc(), platform::to_void_cast(beta)); } diff --git a/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc index 411f33276c361c24bbf89dae7365bd9953ceed9d..a8e1e6e8a0e4c3a084290cf7fb47fe2e742bc13c 100644 --- a/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/split_mkldnn_op.cc @@ -90,7 +90,7 @@ class SplitMKLDNNKernel : public framework::OpKernel { auto x_vec_dims = framework::vectorize(x_dims); - mkldnn::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); + dnnl::memory::data_type x_type = framework::ToMKLDNNDataType(x->type()); auto& astream = platform::MKLDNNDeviceContext::tls().get_stream(); diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index a92e8e6cb047f9e9b533bdbd045f8ee29c9ff329..f8698cdbb64075651d702764fe66d5f81410d25e 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -29,14 +29,14 @@ class TransposeMKLDNNHandler { public: TransposeMKLDNNHandler(std::vector& dims, // NOLINT std::vector& axis, // NOLINT - mkldnn::engine engine) + dnnl::engine engine) : dims_(dims), axis_(axis), logical_axis_(dims.size(), 0), engine_(engine) {} - std::shared_ptr AcquireSrcMemory( - const MKLDNNMemoryFormat& fmt, void* ptr) { + std::shared_ptr AcquireSrcMemory(const MKLDNNMemoryFormat& fmt, + void* ptr) { // Make memory descriptor using input format, unless it // cannot be trusted (nchw) then make up memory fmt manually for (size_t i = 0; i < this->logical_axis_.size(); ++i) { @@ -47,26 +47,26 @@ class TransposeMKLDNNHandler { ? platform::MKLDNNMemDesc( dims_, platform::MKLDNNGetDataType(), fmt) : Axis2MemoryDesc(dims_, logical_axis_); - return std::make_shared(src_md, engine_, ptr); + return std::make_shared(src_md, engine_, ptr); } - std::shared_ptr AcquireDstMemory(framework::Tensor* output, - platform::Place place) { + std::shared_ptr AcquireDstMemory(framework::Tensor* output, + platform::Place place) { auto dst_md = Axis2MemoryDesc(dims_, axis_); auto dst_data = output->mutable_data(place, dst_md.get_size()); - return std::make_shared(dst_md, engine_, dst_data); + return std::make_shared(dst_md, engine_, dst_data); } - std::shared_ptr AcquireTranspose( - std::shared_ptr dst_memory_p, - std::shared_ptr src_memory_p) { - return std::make_shared(*(src_memory_p), *(dst_memory_p)); + std::shared_ptr AcquireTranspose( + std::shared_ptr dst_memory_p, + std::shared_ptr src_memory_p) { + return std::make_shared(*(src_memory_p), *(dst_memory_p)); } protected: - mkldnn::memory::desc Axis2MemoryDesc(std::vector& nchw_tz, // NOLINT - std::vector& axis // NOLINT - ) { + dnnl::memory::desc Axis2MemoryDesc(std::vector& nchw_tz, // NOLINT + std::vector& axis // NOLINT + ) { size_t ndims = axis.size(); std::vector strides(ndims); @@ -75,8 +75,8 @@ class TransposeMKLDNNHandler { strides[axis[i]] = total_stride; total_stride *= nchw_tz[axis[i]]; } - mkldnn::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType(), - strides); + dnnl::memory::desc mem_d(nchw_tz, platform::MKLDNNGetDataType(), + strides); return mem_d; } @@ -85,7 +85,7 @@ class TransposeMKLDNNHandler { std::vector dims_; std::vector axis_; std::vector logical_axis_; - mkldnn::engine engine_; + dnnl::engine engine_; }; template diff --git a/paddle/fluid/operators/optimizers/sgd_op.cc b/paddle/fluid/operators/optimizers/sgd_op.cc index b2e258f815d7256501de462d15b2ac2668fd6dc3..e7c09430e9157632ab643bd791102af035392540 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cc +++ b/paddle/fluid/operators/optimizers/sgd_op.cc @@ -72,7 +72,7 @@ class SGDOp : public framework::OperatorWithKernel { auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Param"); #ifdef PADDLE_WITH_MKLDNN - using mkldnn::memory; + using dnnl::memory; if (this->CanMKLDNNBeUsed(ctx, data_type)) { const auto *param_var = ctx.InputVar("Param"); const auto *grad_var = ctx.InputVar("Grad"); diff --git a/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h b/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h index 17801454da2fa2cbbea9b9ddce1eeb1e6263cf6c..ad7defd22d05631f23e19a5621191824a96ee583 100644 --- a/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h +++ b/paddle/fluid/operators/reduce_ops/mkldnn/reduce_mkldnn_op.h @@ -69,7 +69,7 @@ class ReduceMKLDNNKernel : public framework::OpKernel { // In that case reorder must be executed to maintain compatibility with // PaddlePaddle reduce op if (input_dims == output_dims) { - mkldnn::memory::data_type input_type = + dnnl::memory::data_type input_type = framework::ToMKLDNNDataType(input->type()); platform::ReorderMKLDNNHandler reorder_handler(input_dims, input->type(), input_type, onednn_engine); @@ -132,7 +132,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel { auto* input_dy = ctx.Input(framework::GradVarName("Out")); auto* output_dx = ctx.Output(framework::GradVarName("X")); - mkldnn::memory::format_tag x_format_tag; + dnnl::memory::format_tag x_format_tag; auto input_dims = CalculateReducedDims(output_dx, input_dy, dims, reduce_all, keep_dim); auto output_dims = framework::vectorize(output_dx->dims()); @@ -175,7 +175,7 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel { } protected: - mkldnn::memory::format_tag getPlainFormatTag(const Tensor* tensor) const { + dnnl::memory::format_tag getPlainFormatTag(const Tensor* tensor) const { auto tensor_dims_size = tensor->dims().size(); PADDLE_ENFORCE_EQ( tensor_dims_size <= 5 && tensor_dims_size >= 1, true, @@ -184,16 +184,16 @@ class ReduceGradMKLDNNKernel : public framework::OpKernel { switch (tensor_dims_size) { case 1: - return mkldnn::memory::format_tag::a; + return dnnl::memory::format_tag::a; case 2: - return mkldnn::memory::format_tag::ab; + return dnnl::memory::format_tag::ab; case 3: - return mkldnn::memory::format_tag::abc; + return dnnl::memory::format_tag::abc; case 4: - return mkldnn::memory::format_tag::abcd; + return dnnl::memory::format_tag::abcd; } - return mkldnn::memory::format_tag::abcde; + return dnnl::memory::format_tag::abcde; } }; diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index e15b62413a16fecfe307d3a2ed16c382db682bf0..73232994516b61c28dc7d00be541a64d19ec55e8 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -48,7 +48,6 @@ limitations under the License. */ #ifdef PADDLE_WITH_MKLDNN #include "dnnl.hpp" #include "paddle/fluid/framework/data_layout.h" -namespace mkldnn = dnnl; #endif #include @@ -65,10 +64,6 @@ namespace mkldnn = dnnl; #endif #include "unsupported/Eigen/CXX11/Tensor" -// This aias is required for now so that namespace name changes can be made to -// less than 20 files at a time. After all the names are changed it will be -// removed. - namespace Eigen { struct DefaultDevice; struct GpuDevice;