From a93227a1483d32b221a2d85385c8871d20b9ad09 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 22 Nov 2017 11:19:56 +0800 Subject: [PATCH] refine code --- paddle/operators/conv_op.h | 44 ++++++++++++++-------------- paddle/operators/conv_transpose_op.h | 42 +++++++++++++------------- 2 files changed, 42 insertions(+), 44 deletions(-) diff --git a/paddle/operators/conv_op.h b/paddle/operators/conv_op.h index 152d6b5132e..09bff0a68db 100644 --- a/paddle/operators/conv_op.h +++ b/paddle/operators/conv_op.h @@ -99,20 +99,20 @@ class GemmConvKernel : public framework::OpKernel { // use col_shape in the im2col calculation // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d, // o_h, o_w} - std::vector col_shape_vec(filter_shape_vec.size() + - output_shape_vec.size() - 3); - col_shape_vec.assign(1, input->dims()[1] / groups); - col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin() + 2, - filter_shape_vec.end()); - col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin() + 2, - output_shape_vec.end()); + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + col_shape_vec[0] = input->dims()[1] / groups; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2]; + } framework::DDim col_shape(framework::make_ddim(col_shape_vec)); // use col_matrix_shape in the gemm calculation // size: (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d * // o_h * o_w) framework::DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, filter_shape_vec.size() - 2 + 1); + framework::flatten_to_2d(col_shape, data_dim + 1); bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; @@ -155,13 +155,13 @@ class GemmConvKernel : public framework::OpKernel { col.ShareDataWith(in_slice); col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); - } else if (filter_shape_vec.size() == 4) { + } else if (data_dim == 2U) { // im2col im2col(context.device_context(), in_slice, dilations, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &col); - } else if (filter_shape_vec.size() == 5) { + } else if (data_dim == 3U) { // vol2col vol2col(context.device_context(), in_slice, dilations, strides, paddings, &col); @@ -211,13 +211,13 @@ class GemmConvGradKernel : public framework::OpKernel { // use col_shape in the im2col calculation // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d, // o_h, o_w} - std::vector col_shape_vec(filter_shape_vec.size() + - output_shape_vec.size() - 3); - col_shape_vec.assign(1, input->dims()[1] / groups); - col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin() + 2, - filter_shape_vec.end()); - col_shape_vec.insert(col_shape_vec.end(), output_shape_vec.begin() + 2, - output_shape_vec.end()); + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + col_shape_vec[0] = input->dims()[1] / groups; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = output_shape_vec[j + 2]; + } framework::DDim col_shape(framework::make_ddim(col_shape_vec)); // use col_matrix_shape in the gemm calculation @@ -225,7 +225,7 @@ class GemmConvGradKernel : public framework::OpKernel { // or // (i_c/g * k_d * k_h * k_w, o_d * o_h * o_w) framework::DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, filter_shape_vec.size() - 2 + 1); + framework::flatten_to_2d(col_shape, data_dim + 1); framework::DDim input_shape = framework::slice_ddim( input->dims(), 1, static_cast(input->dims().size())); @@ -286,12 +286,12 @@ class GemmConvGradKernel : public framework::OpKernel { out_grad_slice, false, T(1.0), &col_matrix, T(0.0)); - if (is_expand && filter_shape_vec.size() == 4) { + if (is_expand && data_dim == 2U) { col2im(context.device_context(), col, dilations, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &in_grad_slice); - } else if (is_expand && filter_shape_vec.size() == 5) { + } else if (is_expand && data_dim == 3U) { col2vol(context.device_context(), col, dilations, strides, paddings, &in_grad_slice); } @@ -320,12 +320,12 @@ class GemmConvGradKernel : public framework::OpKernel { col.ShareDataWith(in_slice); col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); - } else if (filter_shape_vec.size() == 4) { + } else if (data_dim == 2U) { im2col(context.device_context(), in_slice, dilations, strides, std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &col); - } else if (filter_shape_vec.size() == 5) { + } else if (data_dim == 3U) { vol2col(context.device_context(), in_slice, dilations, strides, paddings, &col); } diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h index e9c953699e7..0fc0735788c 100644 --- a/paddle/operators/conv_transpose_op.h +++ b/paddle/operators/conv_transpose_op.h @@ -76,19 +76,18 @@ class GemmConvTransposeKernel : public framework::OpKernel { // use col_shape in the im2col and col2im (or vol2col and col2vol) // calculation // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w} - std::vector col_shape_vec(filter_shape_vec.size() + - input_shape_vec.size() - 3); - col_shape_vec.assign(1, output->dims()[1]); - col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin() + 2, - filter_shape_vec.end()); - col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin() + 2, - input_shape_vec.end()); + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + col_shape_vec[0] = output->dims()[1]; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2]; + } DDim col_shape(framework::make_ddim(col_shape_vec)); // use col_matrix_shape in the gemm calculation // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) - DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, filter_shape_vec.size() - 2 + 1); + DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1); Tensor col; col.mutable_data(col_shape, context.GetPlace()); @@ -133,7 +132,7 @@ class GemmConvTransposeKernel : public framework::OpKernel { input_batch, false, static_cast(1.0), &col_matrix, static_cast(0.0)); - if (filter_shape_vec.size() == 4) { + if (data_dim == 2U) { // col2im: col_matrix -> dy // from (c * k_h * k_w, h * w) to (c, o_h, o_w) col2im(context.device_context(), col, @@ -141,7 +140,7 @@ class GemmConvTransposeKernel : public framework::OpKernel { std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &output_batch); - } else if (filter_shape_vec.size() == 5) { + } else if (data_dim == 3U) { // col2vol: col_matrix -> dy // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w) col2vol(context.device_context(), col, dilations, strides, paddings, @@ -181,19 +180,18 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // use col_shape in the im2col and col2im (or vol2col and col2vol) // calculation // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w} - std::vector col_shape_vec(filter_shape_vec.size() + - input_shape_vec.size() - 3); - col_shape_vec.assign(1, output_grad->dims()[1]); - col_shape_vec.insert(col_shape_vec.end(), filter_shape_vec.begin() + 2, - filter_shape_vec.end()); - col_shape_vec.insert(col_shape_vec.end(), input_shape_vec.begin() + 2, - input_shape_vec.end()); + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + col_shape_vec[0] = output_grad->dims()[1]; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2]; + } DDim col_shape(framework::make_ddim(col_shape_vec)); // use col_matrix_shape in the gemm calculation // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) - DDim col_matrix_shape = - framework::flatten_to_2d(col_shape, filter_shape_vec.size() - 2 + 1); + DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1); // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w) DDim output_shape = framework::slice_ddim(output_grad->dims(), 1, @@ -242,7 +240,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { Tensor output_grad_batch = output_grad->Slice(i, i + 1).Resize(output_shape); - if (filter_shape_vec.size() == 4) { + if (data_dim == 2U) { // im2col: dy -> col matrix // from (c, o_h, o_w) to (c * k_h * k_w, h * w) im2col(context.device_context(), output_grad_batch, @@ -250,7 +248,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { std::vector{paddings[0], paddings[1], paddings[0], paddings[1]}, &col); - } else if (filter_shape_vec.size() == 5) { + } else if (data_dim == 3U) { // vol2col: dy -> col_matrix // from (c, o_d, o_h, o_w) to (c * k_d * k_h * k_w, d * h * w) vol2col(context.device_context(), output_grad_batch, dilations, -- GitLab