diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index 16f9241f55acab637d8503ba63ff7b2446627c15..dc7ef19f1361cf2c4ee2431bea7723d3d1487694 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -147,8 +147,8 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout, auto* dev_ctx = dynamic_cast(pool.Get(place)); auto& cpu_engine = dev_ctx->GetEngine(); - std::vector in_tz = paddle::framework::vectorize2int(in.dims()); - std::vector out_tz = in_tz; + auto in_tz = paddle::framework::vectorize(in.dims()); + auto out_tz = in_tz; memory::data_type in_type = ToMKLDNNDataType(in.type()); PADDLE_ENFORCE(in_type != memory::data_type::data_undef, diff --git a/paddle/fluid/framework/ddim.cc b/paddle/fluid/framework/ddim.cc index d4e469247f24b3091f94df1abb365b4933f3dcc7..39f9ef04aa69fefc53c178d8a1121de9d88dc901 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/fluid/framework/ddim.cc @@ -48,13 +48,6 @@ bool DDim::operator==(const DDim& d) const { bool DDim::operator!=(const DDim& d) const { return !(*this == d); } -std::vector vectorize(const DDim& ddim) { - std::vector result(DDim::kMaxRank); - dynamic_dim_assign(ddim.Get(), result.data(), ddim.size()); - result.resize(ddim.size()); - return result; -} - // NOTE: framework::vectorize converts to type int64_t // which does not fit cudnn inputs. std::vector vectorize2int(const DDim& ddim) { diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index bfe3e55a73da42b0865d4e02b59ec6be90ea0096..62a9ad36dbfc7fe40259332551adfd2d596fccf7 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -170,7 +170,13 @@ DDim make_ddim(const std::vector& dims); */ DDim make_ddim(std::initializer_list dims); -std::vector vectorize(const DDim& ddim); +template +std::vector vectorize(const DDim& ddim) { + std::vector result(DDim::kMaxRank); + dynamic_dim_assign(ddim.Get(), result.data(), ddim.size()); + result.resize(ddim.size()); + return result; +} std::vector vectorize2int(const DDim& ddim); int64_t product(const DDim& ddim); diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 8fbed7aac781b6c73a380721050d2df0f79c7377..a36e3605270dae7dfb7039891d5bbc410edefd2a 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -816,7 +816,7 @@ void CompileTimeInferShapeContext::SetRepeatedDims( auto var = block_.FindVarRecursive(name); PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); std::vector> dim_vec(dims.size()); - std::transform(dims.begin(), dims.end(), dim_vec.begin(), vectorize); + std::transform(dims.begin(), dims.end(), dim_vec.begin(), vectorize<>); var->SetShapes(dim_vec); } diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index 0714cb86ffd4fa0131f008e92752f5144f14e1b0..dec64ba08e96830b4839eb85e282965c8908e4be 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -97,7 +97,7 @@ void eltwise_forward(const framework::ExecutionContext &ctx, x->dims().size() == 2 || x->dims().size() == 3 || x->dims().size() == 4, "Input dim must be with 2, 3 or 4"); - std::vector src_tz = framework::vectorize2int(x->dims()); + auto src_tz = framework::vectorize(x->dims()); auto src_format = src_tz.size() == 2 ? MKLDNNMemoryFormat::nc : x->format(); @@ -149,7 +149,7 @@ void eltwise_grad(const framework::ExecutionContext &ctx, const T alpha = ctx.op().HasAttr("alpha") ? ctx.Attr("alpha") : 0; const T beta = ctx.op().HasAttr("beta") ? ctx.Attr("beta") : 0; - std::vector diff_dst_tz = framework::vectorize2int(diff_y->dims()); + auto diff_dst_tz = framework::vectorize(diff_y->dims()); // diff_dst and src dims should be the same auto src_format = diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index 92f3a2c82161b56b53eb3717bd6aa3cc7cb01b8e..d2a27d81ba9bb188f430911d93a0bceb3374da12 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -214,8 +214,8 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { ? mkldnn::prop_kind::forward_scoring : mkldnn::prop_kind::forward_training; - auto src_tz = paddle::framework::vectorize2int(x->dims()); - auto scale_tz = paddle::framework::vectorize2int(scale->dims()); + auto src_tz = paddle::framework::vectorize(x->dims()); + auto scale_tz = paddle::framework::vectorize(scale->dims()); PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); const unsigned int ic = scale_tz[0]; @@ -349,11 +349,11 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { T *diff_scale_data = diff_scale->mutable_data(ctx.GetPlace()); T *diff_shift_data = diff_shift->mutable_data(ctx.GetPlace()); - auto src_tz = paddle::framework::vectorize2int(x->dims()); + auto src_tz = paddle::framework::vectorize(x->dims()); auto diff_src_tz = src_tz; auto dst_tz = src_tz; auto diff_dst_tz = dst_tz; - auto scale_tz = paddle::framework::vectorize2int(scale->dims()); + auto scale_tz = paddle::framework::vectorize(scale->dims()); PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); const unsigned int ic = scale_tz[0]; diff --git a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc index 03555dbadcbaf06405e8c9165b01f87dabf8c6e5..5c75afdc7465caabaef087e54e359a68c601bc1e 100644 --- a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc @@ -40,7 +40,7 @@ static void EnforceLayouts(const std::vector inputs) { static memory::primitive_desc CreateMemPrimDesc(const Tensor& input, const mkldnn::engine& engine, const memory::data_type& dt) { - const auto dims = paddle::framework::vectorize2int(input.dims()); + const auto dims = paddle::framework::vectorize(input.dims()); const auto format = input.format(); auto description = memory::desc(dims, dt, format); auto mem_prim_desc = memory::primitive_desc(description, engine); @@ -73,7 +73,7 @@ std::string CreateKey(const paddle::framework::ExecutionContext& ctx, key.reserve(platform::MKLDNNHandler::MaxKeyLength); for (size_t i = 0; i < multi_input.size(); i++) { platform::MKLDNNHandler::AppendKeyDims( - &key, paddle::framework::vectorize2int(multi_input[i]->dims())); + &key, paddle::framework::vectorize(multi_input[i]->dims())); } platform::MKLDNNHandler::AppendKey(&key, std::to_string(concat_axis)); platform::MKLDNNHandler::AppendKey(&key, ctx.op().Output("Out")); @@ -124,7 +124,7 @@ class ConcatPrimitiveFactory { private: memory::desc CreateDstMemDescriptor(Tensor* output, const memory::data_type& dt) { - auto dst_dims = paddle::framework::vectorize2int(output->dims()); + auto dst_dims = paddle::framework::vectorize(output->dims()); return memory::desc(dst_dims, dt, MKLDNNMemoryFormat::any); } diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index 9c3ffe45a5a36ff13db0e67d4a1be13a07e4b7a4..2b426bcdd522978b7b033b04d4d21ffad5ee22c4 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -183,12 +183,11 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const T* input_data = input->data(); const T* filter_data = filter->data(); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector weights_tz = - paddle::framework::vectorize2int(filter->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto weights_tz = paddle::framework::vectorize(filter->dims()); int g = std::max(groups, 1); GetWeightsTz(weights_tz, g, is_conv3d); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); // Get unique name for storing MKLDNN primitives const std::string key = platform::ConvMKLDNNHandler::GetHash( @@ -238,7 +237,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto fwd_prop_kind = is_test ? mkldnn::prop_kind::forward_inference : mkldnn::prop_kind::forward_training; if (bias) { - bias_tz = paddle::framework::vectorize2int(bias->dims()); + bias_tz = paddle::framework::vectorize(bias->dims()); auto bias_md = platform::MKLDNNMemDesc( bias_tz, platform::MKLDNNGetDataType(), MKLDNNMemoryFormat::x); conv_pd = handler.AcquireConvolutionPrimitiveDescriptor( @@ -281,7 +280,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto output_data = output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); auto residual_data_tz = - paddle::framework::vectorize2int(residual_param->dims()); + paddle::framework::vectorize(residual_param->dims()); auto residual_data_type = paddle::framework::ToMKLDNNDataType(residual_param->type()); @@ -405,13 +404,12 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const T* input_data = input->data(); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector weights_tz = - paddle::framework::vectorize2int(filter->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto weights_tz = paddle::framework::vectorize(filter->dims()); int g = std::max(groups, 1); GetWeightsTz(weights_tz, g, is_conv3d); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); mkldnn::memory::data_type src_dt = paddle::framework::ToMKLDNNDataType(input->type()); @@ -514,7 +512,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { : mkldnn::prop_kind::forward_training; if (bias) { - bias_tz = paddle::framework::vectorize2int(bias->dims()); + bias_tz = paddle::framework::vectorize(bias->dims()); auto bias_md = platform::MKLDNNMemDesc(bias_tz, memory::data_type::s32, MKLDNNMemoryFormat::x); conv_pd = handler->AcquireConvolutionPrimitiveDescriptor( @@ -554,7 +552,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { paddle::framework::ToMKLDNNDataType(residual_param->type()); if (residual_param->format() != handler->GetDstFormat()) { auto residual_data_tz = - paddle::framework::vectorize2int(residual_param->dims()); + paddle::framework::vectorize(residual_param->dims()); auto user_residual_md = platform::MKLDNNMemDesc( residual_data_tz, residual_dt, residual_param->format()); dst_memory_p = platform::SetDstMemory( @@ -705,13 +703,11 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { T* input_grad_data = nullptr; T* filter_grad_data = nullptr; - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector weights_tz = - paddle::framework::vectorize2int(filter->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto weights_tz = paddle::framework::vectorize(filter->dims()); int g = std::max(groups, 1); GetWeightsTz(weights_tz, g, is_conv3d); - std::vector dst_tz = - paddle::framework::vectorize2int(output_grad->dims()); + auto dst_tz = paddle::framework::vectorize(output_grad->dims()); auto src_format = input->format(); MKLDNNMemoryFormat weights_format = GetWeightsFormat(filter->format(), g, is_conv3d); diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index e7758ba19b7b93793bbf7d1acca29ebeaa636db5..4efa4afc6dca5091a61ab09d214f5363dcd6e0f8 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -82,10 +82,10 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { const T* input_data = input->data(); const T* filter_data = filter->data(); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector iohw_weights_tz = - paddle::framework::vectorize2int(filter->dims()); - std::vector weights_tz = iohw_weights_tz; + auto src_tz = paddle::framework::vectorize(input->dims()); + auto iohw_weights_tz = paddle::framework::vectorize(filter->dims()); + auto weights_tz = iohw_weights_tz; + // IOHW -> OIHW weights_tz[0] = iohw_weights_tz[1]; weights_tz[1] = iohw_weights_tz[0]; @@ -124,7 +124,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { weights_tz[3] = h; weights_tz[4] = w; } - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); // Get unique name for storing MKLDNN primitives const std::string key = platform::ConvTransposeMKLDNNHandler::GetHash( @@ -166,7 +166,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { auto fwd_prop_kind = is_test ? mkldnn::prop_kind::forward_inference : mkldnn::prop_kind::forward_training; if (bias) { - bias_tz = paddle::framework::vectorize2int(bias->dims()); + bias_tz = paddle::framework::vectorize(bias->dims()); auto bias_md = platform::MKLDNNMemDesc( bias_tz, platform::MKLDNNGetDataType(), MKLDNNMemoryFormat::x); conv_transpose_pd = handler.AcquireConvolutionPrimitiveDescriptor( diff --git a/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc index 336bd5644623c47673f8ae000997bb0287048eae..575103e3d34c5d93fc3c8b0512f3d294ac21b71f 100644 --- a/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc @@ -59,8 +59,8 @@ class DeQuantOpKernel : public framework::OpKernel { std::vector reorder_scale = {1.0f / scale_data}; std::vector pipeline; - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); mkldnn::memory::data_type src_dt = paddle::framework::ToMKLDNNDataType(input->type()); MKLDNNMemoryFormat src_fmt = input->format(); diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index 8f720f3268e2f2a2aa678325d6b8c11d137d960a..30790425cb34562e92ee578530150636de23816a 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -109,7 +109,7 @@ class FCPrimitiveFactory { static mkldnn::memory::desc CreateMemDescriptor(const Tensor* tensor, MKLDNNMemoryFormat format) { - auto dims = framework::vectorize2int(tensor->dims()); + auto dims = framework::vectorize(tensor->dims()); return CreateMemDescriptor(dims, format); } @@ -124,7 +124,7 @@ class FCPrimitiveFactory { } mkldnn::memory TransposeWeights(const Tensor* weights) { - auto dims = framework::vectorize2int(weights->dims()); + auto dims = framework::vectorize(weights->dims()); std::swap(dims[0], dims[1]); // Correct output dimensions auto src_desc = CreateMemDescriptor(dims, MKLDNNMemoryFormat::io); auto dst_desc = CreateMemDescriptor(dims, MKLDNNMemoryFormat::oi); @@ -182,8 +182,8 @@ class FCPrimitiveFactory { mkldnn::memory CreateFourDimWeightsMemory(const Tensor* input, const Tensor* weights) { - auto input_dims = framework::vectorize2int(input->dims()); - auto weight_dims = framework::vectorize2int(weights->dims()); + auto input_dims = framework::vectorize(input->dims()); + auto weight_dims = framework::vectorize(weights->dims()); auto dims = {weight_dims[1], input_dims[1], input_dims[2], input_dims[3]}; auto dst_format = MatchWeightFormat(input->format()); diff --git a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc index 97ffb385a0e87f82d04d1e3b8e27b38959476d12..263a3393d2017bb9f629f92a2bc3afbf82a2f263 100644 --- a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc @@ -56,7 +56,7 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { auto e_mid = framework::EigenTensor::From(*mid); e_mid = e_mid.constant(k); - auto dims = paddle::framework::vectorize2int(x->dims()); + auto dims = paddle::framework::vectorize(x->dims()); // Format and dims are assumed to be the same for dst and src auto md = paddle::platform::MKLDNNMemDesc( @@ -119,7 +119,7 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel { auto x_grad_data = x_grad->mutable_data(ctx.GetPlace()); auto out_grad_data = out_grad->data(); - auto dims = paddle::framework::vectorize2int(x->dims()); + auto dims = paddle::framework::vectorize(x->dims()); const std::string key = platform::LRNMKLDNNHandler::GetHash( dims, n, alpha, beta, k, x->format(), ctx.op().Input("Out")); diff --git a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc index f20c66be0fe413b4dca49028e3f0b9014f9e7a88..f1936b01f2d5ec7c49438b42a25167f43b06a70c 100644 --- a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc @@ -116,7 +116,7 @@ class MulPrimitiveFactory { memory::desc CreateMemDescriptor( const Tensor *tensor, MKLDNNMemoryFormat format, memory::data_type type = platform::MKLDNNGetDataType()) { - auto dims = framework::vectorize2int(tensor->dims()); + auto dims = framework::vectorize(tensor->dims()); return platform::MKLDNNMemDesc(dims, type, format); } @@ -156,7 +156,7 @@ class MulPrimitiveFactory { } memory TransposeInputY(const Tensor *input_y) { - auto dims = framework::vectorize2int(input_y->dims()); + auto dims = framework::vectorize(input_y->dims()); std::swap(dims[0], dims[1]); // Correct output dimensions auto src_desc = CreateMemDescriptor(dims, MKLDNNMemoryFormat::io); auto dst_desc = CreateMemDescriptor(dims, MKLDNNMemoryFormat::oi); diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 6fbbe8fee22313ecf6a7caaf90b11910ea52a246..58a59a88eed8235ee99582935d347552018c86b9 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -69,8 +69,8 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { const T* input_data = input->data(); T* output_data = output->mutable_data(ctx.GetPlace()); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); auto input_format = input->format(); MKLDNNMemoryFormat output_format{MKLDNNMemoryFormat::format_undef}; @@ -166,10 +166,8 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { T* in_x_grad_data = in_x_grad->mutable_data(ctx.GetPlace()); MKLDNNMemoryFormat in_x_grad_format{MKLDNNMemoryFormat::format_undef}; - std::vector diff_src_tz = - paddle::framework::vectorize2int(in_x_grad->dims()); - std::vector diff_dst_tz = - paddle::framework::vectorize2int(out_grad->dims()); + auto diff_src_tz = paddle::framework::vectorize(in_x_grad->dims()); + auto diff_dst_tz = paddle::framework::vectorize(out_grad->dims()); // Get an unique name from "argument" name of "Out" variable // This name will be used as key when referring info from device context diff --git a/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc index e0e7829cd86fbc1736e393cf489a9b5192a03983..6d1bf50befa67e4925fd329442594e923d8b8a84 100644 --- a/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc @@ -54,8 +54,8 @@ class QuantOpKernel : public framework::OpKernel { const auto& engine = dev_ctx.GetEngine(); std::vector pipeline; - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); const T* input_data = input->data(); diff --git a/paddle/fluid/operators/mkldnn/requantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/requantize_mkldnn_op.cc index ece0c3250c93f72caa8a0fd947332a74d8bd646f..a5e1e5041fb1e4503cd1aa36b8785be3148a7e75 100644 --- a/paddle/fluid/operators/mkldnn/requantize_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/requantize_mkldnn_op.cc @@ -43,8 +43,8 @@ class ReQuantOpKernel : public framework::OpKernel { const auto& engine = dev_ctx.GetEngine(); std::vector pipeline; - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + auto src_tz = paddle::framework::vectorize(input->dims()); + auto dst_tz = paddle::framework::vectorize(output->dims()); mkldnn::memory::data_type src_dt = paddle::framework::ToMKLDNNDataType(input->type()); mkldnn::memory::data_type dst_dt = src_dt; diff --git a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc index 51669f305325a12506051d398826e439d91abf21..5e49a08a90848423d62ebfb12eb2ac663f201ccd 100644 --- a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc @@ -199,8 +199,8 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { const T* input_data = flattened_input.data(); T* output_data = flattened_output.mutable_data(ctx.GetPlace()); - std::vector src_tz = paddle::framework::vectorize2int(flattened_dims); - std::vector dst_tz = src_tz; + auto src_tz = paddle::framework::vectorize(flattened_dims); + auto dst_tz = src_tz; // Same memory descriptor to be used for input and output memory::dims softmax_tz = {src_tz[0], src_tz[1]}; // Generate keys for storing/retriving primitives for this operator @@ -268,8 +268,8 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel { const T* diff_dst_ptr = flattened_dout.template data(); T* diff_src_ptr = flattened_dx.template mutable_data(ctx.GetPlace()); - std::vector dst_tz = paddle::framework::vectorize2int(flattened_dims); - std::vector src_tz(dst_tz); + auto dst_tz = paddle::framework::vectorize(flattened_dims); + auto src_tz(dst_tz); // Same memory descriptor to be used for input and output memory::dims softmax_tz = {src_tz[0], src_tz[1]}; diff --git a/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc index fb1477284cf7f33367b08ee506af312fc07212a1..1a8e9d6911dc4756f8c7d3338d58c6893caa47bd 100644 --- a/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc @@ -63,7 +63,7 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel { LoDTensor* output = ctx.Output("Out"); T* output_data = output->mutable_data(ctx.GetPlace()); - std::vector dst_tz = framework::vectorize2int(output->dims()); + auto dst_tz = framework::vectorize(output->dims()); auto src_tz = dst_tz; MKLDNNMemoryFormat output_format{MKLDNNMemoryFormat::format_undef}; std::vector scales; diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index 1bcf0a3ce612b62e2474d8dffbe45b99a36fd190..4a79d590c253d2e30ea93217798d59055a2c3baf 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -43,7 +43,7 @@ class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel { return; } - std::vector nchw_tz = paddle::framework::vectorize2int(input->dims()); + auto nchw_tz = paddle::framework::vectorize(input->dims()); const std::string key = platform::TransposeMKLDNNHandler::GetHash( nchw_tz, axis, @@ -97,8 +97,7 @@ class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel { const T* out_grad_data = out_grad->data(); x_grad->mutable_data(ctx.GetPlace()); - std::vector nchw_tz = - paddle::framework::vectorize2int(out_grad->dims()); + auto nchw_tz = paddle::framework::vectorize(out_grad->dims()); const std::string key = platform::TransposeMKLDNNHandler::GetHash( nchw_tz, axis, ctx.op().Output(framework::GradVarName("X")));