diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index 88ccbb51b4ee7140621714a177a6689d96e97bef..cd11e87c9327df25dad572758c1d2e04dbf7cc93 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -281,7 +281,7 @@ class WhileGradOp : public framework::OperatorBase { auto &inside_tensor = var->Get(); framework::AttributeMap attrs; attrs["dtype"] = inside_tensor.type(); - attrs["shape"] = framework::vectorize2int(inside_tensor.dims()); + attrs["shape"] = framework::vectorize(inside_tensor.dims()); attrs["value"] = 0.0f; auto var_name = pg_ig_names[param_id]; diff --git a/paddle/fluid/operators/conv_fusion_op.cu.cc b/paddle/fluid/operators/conv_fusion_op.cu.cc index 9b9b3e1d8bd6e3196d34e2b0efb2e1433f3a6016..566daa6608282d89a92418e2c168bdf2c09c65c1 100644 --- a/paddle/fluid/operators/conv_fusion_op.cu.cc +++ b/paddle/fluid/operators/conv_fusion_op.cu.cc @@ -79,11 +79,11 @@ class CUDNNConvFusionOpKernel : public framework::OpKernel { cudnn_conv_desc, groups)); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize(input->dims())); cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output->dims())); + layout, framework::vectorize(output->dims())); cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( - layout, framework::vectorize2int(filter->dims())); + layout, framework::vectorize(filter->dims())); // Now only support NCHW std::vector bias_dim = {1, static_cast(output->dims()[1]), 1, 1}; cudnnTensorDescriptor_t cudnn_bias_desc = diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 9248f291e6f50e93ebe216055dc7e6ad6498e17f..bab6fe24e42f15e2703a977d1500bc63f343e79c 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -64,13 +64,13 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { // (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims()), groups); + layout, framework::vectorize(input->dims()), groups); // (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output->dims()), groups); + layout, framework::vectorize(output->dims()), groups); // (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( - layout, framework::vectorize2int(filter->dims()), groups); + layout, framework::vectorize(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); @@ -148,13 +148,13 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { // Input: (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims()), groups); + layout, framework::vectorize(input->dims()), groups); // Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output_grad->dims()), groups); + layout, framework::vectorize(output_grad->dims()), groups); // Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( - layout, framework::vectorize2int(filter->dims()), groups); + layout, framework::vectorize(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); diff --git a/paddle/fluid/operators/fused/fusion_conv_inception_op.cu b/paddle/fluid/operators/fused/fusion_conv_inception_op.cu index 76ea6f1b59d6c2c4512f53846886fd81b77ecfbb..63e97ab5d98cdb906bab1aaf759de00d16455729 100644 --- a/paddle/fluid/operators/fused/fusion_conv_inception_op.cu +++ b/paddle/fluid/operators/fused/fusion_conv_inception_op.cu @@ -61,7 +61,7 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { T* temp_data = temp_outs[0]->mutable_data(input->dims(), ctx.GetPlace()); DataLayout layout = DataLayout::kNCHW; - std::vector in_dim = framework::vectorize2int(input->dims()); + std::vector in_dim = framework::vectorize(input->dims()); // ------------------- cudnn descriptors --------------------- PoolingMode pooling_mode; @@ -83,9 +83,9 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { pool_desc.descriptor(pooling_mode, k3x3, k1x1, k1x1); cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize(input->dims())); cudnnTensorDescriptor_t pool_out_desc = out_pool_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize(input->dims())); cudnnDataType_t cudnn_dtype = CudnnDataType::type; cudnnTensorDescriptor_t* out_desc = new cudnnTensorDescriptor_t[4]; @@ -126,7 +126,7 @@ class CUDNNConvInceptionFusionOpKernel : public framework::OpKernel { : CUDNN_DATA_FLOAT; for (int i = 0; i < 4; ++i) { - filter_dims.push_back(framework::vectorize2int(filters[i]->dims())); + filter_dims.push_back(framework::vectorize(filters[i]->dims())); CUDNN_ENFORCE(platform::dynload::cudnnSetFilterNdDescriptor( filter_desc[i], cudnn_dtype, format, 4, filter_dims[i].data())); bias_dims.push_back({1, filter_dims[i][0], 1, 1}); diff --git a/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc index 7cde7ca462fda9ae6ace7755af0a432afee28bba..c765d344d0be88b56a9d7b5b9ac1d572d2aa5c24 100644 --- a/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc +++ b/paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc @@ -55,9 +55,9 @@ class CUDNNGridSampleOpKernel : public framework::OpKernel { ScopedTensorDescriptor input_desc; ScopedTensorDescriptor output_desc; cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - DataLayout::kNCHW, framework::vectorize2int(input->dims())); + DataLayout::kNCHW, framework::vectorize(input->dims())); cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - DataLayout::kNCHW, framework::vectorize2int(output->dims())); + DataLayout::kNCHW, framework::vectorize(output->dims())); CUDNN_ENFORCE(platform::dynload::cudnnSpatialTfSamplerForward( handle, cudnn_st_desc, CudnnDataType::kOne(), cudnn_input_desc, @@ -103,13 +103,13 @@ class CUDNNGridSampleGradOpKernel : public framework::OpKernel { ScopedTensorDescriptor input_grad_desc; ScopedTensorDescriptor output_grad_desc; cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - DataLayout::kNCHW, framework::vectorize2int(input->dims())); + DataLayout::kNCHW, framework::vectorize(input->dims())); cudnnTensorDescriptor_t cudnn_input_grad_desc = input_grad_desc.descriptor( - DataLayout::kNCHW, framework::vectorize2int(input_grad->dims())); + DataLayout::kNCHW, framework::vectorize(input_grad->dims())); cudnnTensorDescriptor_t cudnn_output_grad_desc = output_grad_desc.descriptor( - DataLayout::kNCHW, framework::vectorize2int(output_grad->dims())); + DataLayout::kNCHW, framework::vectorize(output_grad->dims())); CUDNN_ENFORCE(platform::dynload::cudnnSpatialTfSamplerBackward( handle, cudnn_st_dest, CudnnDataType::kOne(), cudnn_input_desc, diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 71d137398267f61d8cc01907d6a9498eef8d62dc..1c0970c0aa4692bcc51bd69b025b82ecff5bec65 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -35,7 +35,7 @@ void SoftmaxCUDNNFunctor::operator()( // ------------------- cudnn descriptors --------------------- ScopedTensorDescriptor xDesc; ScopedTensorDescriptor yDesc; - std::vector cudnn_tensor_dims = framework::vectorize2int(X->dims()); + std::vector cudnn_tensor_dims = framework::vectorize(X->dims()); DataLayout layout = DataLayout::kNCHW; if (cudnn_tensor_dims.size() == 5) { layout = DataLayout::kNCDHW; @@ -64,7 +64,7 @@ void SoftmaxGradCUDNNFunctor::operator()( ScopedTensorDescriptor yDesc; ScopedTensorDescriptor dyDesc; ScopedTensorDescriptor dxDesc; - std::vector cudnn_tensor_dims = framework::vectorize2int(Y->dims()); + std::vector cudnn_tensor_dims = framework::vectorize(Y->dims()); DataLayout layout = DataLayout::kNCHW; if (cudnn_tensor_dims.size() == 5) { layout = DataLayout::kNCDHW; diff --git a/paddle/fluid/operators/nce_op.h b/paddle/fluid/operators/nce_op.h index 5665b9f55194a7e2fea5ae55b0829742e1b25582..1f2f778bcd75d083e33ae43ed66f5ba345356003 100644 --- a/paddle/fluid/operators/nce_op.h +++ b/paddle/fluid/operators/nce_op.h @@ -186,7 +186,7 @@ class NCEKernel : public framework::OpKernel { std::memcpy(x_tensor->data(), labels.data(), labels.size() * sizeof(int64_t)); - std::vector w_dims = paddle::framework::vectorize2int( + std::vector w_dims = paddle::framework::vectorize( context.Input("Weight")->dims()); w_dims[0] = static_cast(labels.size()); diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 4a332ce10b59b21d2518684237ce0bbf1bbfa75a..b26f127026804239afd2ebffcb1f93eb3011e238 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -65,9 +65,9 @@ class PoolCUDNNOpKernel : public framework::OpKernel { } cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize(input->dims())); cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output->dims())); + layout, framework::vectorize(output->dims())); PoolingMode pooling_mode; if (pooling_type == "max") { @@ -132,9 +132,9 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { } cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize(input->dims())); cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output->dims())); + layout, framework::vectorize(output->dims())); PoolingMode pooling_mode; if (pooling_type == "max") { diff --git a/paddle/fluid/operators/prelu_op.cu b/paddle/fluid/operators/prelu_op.cu index 998768db0c0bbb41e5f7871c21376ec9680dc8d2..4a26c98af8814a500e35cb2168097a43b16cef44 100644 --- a/paddle/fluid/operators/prelu_op.cu +++ b/paddle/fluid/operators/prelu_op.cu @@ -41,7 +41,7 @@ class CUDAPReluKernel : public framework::OpKernel { int numel = x->numel(); auto dim = x->dims(); - std::vector input_shape = framework::vectorize2int(dim); + std::vector input_shape = framework::vectorize(dim); if (mode == "channel") { math::PreluChannelWiseDirectCUDAFunctor prelu_channel_wise; @@ -157,7 +157,7 @@ class CUDAPReluGradKernel : public framework::OpKernel { int numel = x->numel(); auto dim = x->dims(); - std::vector input_shape = framework::vectorize2int(dim); + std::vector input_shape = framework::vectorize(dim); auto stream = context.cuda_device_context().stream(); T* dalpha_tmp_ptr; diff --git a/paddle/fluid/operators/random_crop_op.cc b/paddle/fluid/operators/random_crop_op.cc index dad46ec6683349b9d383368a85411a39750e3e2f..65a8d603fcee27223182984769df221e3f519b05 100644 --- a/paddle/fluid/operators/random_crop_op.cc +++ b/paddle/fluid/operators/random_crop_op.cc @@ -56,7 +56,7 @@ class RandomCropOpInferShape : public framework::InferShapeBase { auto shape = ctx->Attrs().Get>("shape"); auto x_dim = ctx->GetInputDim("X"); PADDLE_ENFORCE_GT(x_dim.size(), static_cast(shape.size())); - auto out_dim = framework::vectorize2int(x_dim); + auto out_dim = framework::vectorize(x_dim); for (size_t i = 1; i <= shape.size(); ++i) { size_t x_i = x_dim.size() - i; size_t shape_i = shape.size() - i; diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 43c724e91b23b8e8dad796d3899c696dd491a3f5..91615a1b43fccaf330b05ffffe06b4bc428737c7 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -395,7 +395,7 @@ void RecurrentGradOp::RunImpl(const framework::Scope &scope, cur_scope.FindVar(inside_grad_name)->Get(); framework::AttributeMap attrs; attrs["dtype"] = inside_tensor.type(); - attrs["shape"] = framework::vectorize2int(inside_tensor.dims()); + attrs["shape"] = framework::vectorize(inside_tensor.dims()); attrs["value"] = 0.0f; auto zero_op = framework::OpRegistry::CreateOp( diff --git a/paddle/fluid/operators/reduce_ops/cub_reduce.h b/paddle/fluid/operators/reduce_ops/cub_reduce.h index afd3922b8d6537ee16dc5041a838858089adbdb1..af56e85e9c6f5e0cfb5e03587fbace4665d9e5fb 100644 --- a/paddle/fluid/operators/reduce_ops/cub_reduce.h +++ b/paddle/fluid/operators/reduce_ops/cub_reduce.h @@ -251,7 +251,7 @@ void TensorReduce(const framework::Tensor& x, framework::Tensor* y, std::vector origin_reduce_dims, const Ty& init, const ReduceOp& reducer, const TransformOp& transformer, cudaStream_t stream) { - auto x_dim = framework::vectorize2int(x.dims()); + auto x_dim = framework::vectorize(x.dims()); std::vector new_x_dim, new_reduce_dims; int is_reduced = 0; for (auto e : origin_reduce_dims) { diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index 9f652480a2cc2675d959c03f4e60aec2d602004b..f360ae3cbf4264f655a9d517708a4f9a1c1a15da 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -107,7 +107,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { framework::AttributeMap attrs; attrs["dtype"] = in_var_tensor.type(); - attrs["shape"] = framework::vectorize2int(in_var_tensor.dims()); + attrs["shape"] = framework::vectorize(in_var_tensor.dims()); attrs["value"] = 0.0f; auto zero_op = framework::OpRegistry::CreateOp( diff --git a/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc index 33d24c11f95758989b21bfd420a7b75685b4ad24..a7225adbf9fcafdff30ecf0f6c7a5f6a73c4f3e8 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc @@ -27,7 +27,7 @@ class SequenceMaskOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist"); int maxlen = ctx->Attrs().Get("maxlen"); - auto dim = framework::vectorize2int(ctx->GetInputDim("X")); + auto dim = framework::vectorize(ctx->GetInputDim("X")); if (ctx->HasInputs("MaxLenTensor")) { dim.push_back(-1); diff --git a/paddle/fluid/operators/sequence_ops/sequence_mask_op.h b/paddle/fluid/operators/sequence_ops/sequence_mask_op.h index a92c5de632065b868a21df856bf6ca34ea13502a..abddc6859fe737ca610c694577c94e530803c931 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_mask_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.h @@ -89,7 +89,7 @@ class SequenceMaskKernel : public framework::OpKernel { maxlen = *max_len_tensor->data(); } - auto y_dim = framework::vectorize2int(x->dims()); + auto y_dim = framework::vectorize(x->dims()); y_dim.push_back(maxlen); y->Resize(framework::make_ddim(y_dim)); @@ -110,7 +110,7 @@ class SequenceMaskKernel : public framework::OpKernel { #else maxlen = static_cast(*std::max_element(x_data, x_data + x_numel)); #endif - auto y_dim = framework::vectorize2int(x->dims()); + auto y_dim = framework::vectorize(x->dims()); y_dim.push_back(maxlen); y->Resize(framework::make_ddim(y_dim)); } diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc index 5290d0e6c6a2569e389345f61a0844ce3cbde10f..073166703c14b0cf0846fb2ee9519ac10b547a44 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc @@ -81,7 +81,7 @@ class SequencePadOp : public framework::OperatorWithKernel { std::vector out_dims_vec{out_dim_0, padded_length}; std::vector len_dims_vec{out_dim_0, 1}; - auto time_step_dims_vec = framework::vectorize2int(time_step_dims); + auto time_step_dims_vec = framework::vectorize(time_step_dims); out_dims_vec.insert(out_dims_vec.end(), time_step_dims_vec.begin(), time_step_dims_vec.end()); ctx->SetOutputDim("Out", framework::make_ddim(out_dims_vec)); diff --git a/paddle/fluid/operators/slice_op.cu b/paddle/fluid/operators/slice_op.cu index 24a564f9ef9d6e7bdb80047d69b35a980a141bab..c3fd5190d2cf5ace59e3250b2a1f2ff7aaf5556a 100644 --- a/paddle/fluid/operators/slice_op.cu +++ b/paddle/fluid/operators/slice_op.cu @@ -84,9 +84,9 @@ class SliceGradKernel(out_dims); thrust::device_vector out_dims_vec(out_shape.begin(), out_shape.end()); - auto in_shape = framework::vectorize2int(in_dims); + auto in_shape = framework::vectorize(in_dims); thrust::device_vector in_dims_vec(in_shape.begin(), in_shape.end()); thrust::device_vector offsets_vec(offsets.begin(), offsets.end()); const int* out_dims_ptr = thrust::raw_pointer_cast(out_dims_vec.data()); diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc index dc15df2c3c1b8a2964312d983be8ce362d3ab95d..e1795303f72220a7221f56e33af314e46507ead3 100644 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -111,7 +111,7 @@ class SqueezeOp : public framework::OperatorBase { auto out_dims = SqueezeOpInferShape::GetOutputShape(axes, x_dims, true); framework::AttributeMap attrs; - attrs["shape"] = framework::vectorize2int(out_dims); + attrs["shape"] = framework::vectorize(out_dims); // Invoke Reshape Op auto reshape_op = framework::OpRegistry::CreateOp( "reshape", {{"X", {Input("X")}}, {"Shape", {}}}, @@ -177,7 +177,7 @@ class SqueezeGradOp : public framework::OperatorBase { auto dout_name = Input(framework::GradVarName("Out")); auto x_dims = scope.FindVar(Input("X"))->Get().dims(); framework::AttributeMap attrs; - attrs["shape"] = framework::vectorize2int(x_dims); + attrs["shape"] = framework::vectorize(x_dims); auto reshape_op = framework::OpRegistry::CreateOp( "reshape", {{"X", {dout_name}}, {"Shape", {}}}, {{"Out", {dx_name}}}, @@ -231,7 +231,7 @@ class Squeeze2Op : public framework::OperatorBase { auto out_dims = Squeeze2OpInferShape::GetOutputShape(axes, x_dims, true); framework::AttributeMap attrs; - attrs["shape"] = framework::vectorize2int(out_dims); + attrs["shape"] = framework::vectorize(out_dims); // Invoke Reshape Op auto reshape_op = framework::OpRegistry::CreateOp( "reshape2", {{"X", {Input("X")}}, {"Shape", {}}}, @@ -284,7 +284,7 @@ class Squeeze2GradOp : public framework::OperatorBase { auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); framework::AttributeMap attrs; - attrs["shape"] = framework::vectorize2int(x_dims); + attrs["shape"] = framework::vectorize(x_dims); auto reshape_op = framework::OpRegistry::CreateOp( "reshape2", {{"X", {dout_name}}, {"Shape", {}}},