From 8368e55be93ea6d2912e3ebebb35e284c5428a28 Mon Sep 17 00:00:00 2001 From: sweetsky0901 Date: Mon, 4 Dec 2017 11:19:16 +0800 Subject: [PATCH] modify some doc --- paddle/operators/spp_op.cc | 4 +- paddle/operators/spp_op.h | 47 +++++++++++---------- python/paddle/v2/fluid/tests/test_spp_op.py | 19 ++++++--- 3 files changed, 38 insertions(+), 32 deletions(-) diff --git a/paddle/operators/spp_op.cc b/paddle/operators/spp_op.cc index ff607c576..026b35de1 100644 --- a/paddle/operators/spp_op.cc +++ b/paddle/operators/spp_op.cc @@ -29,7 +29,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) The output tensor of spp operator." "N * M." "M = C * H * W"); - AddAttr("pyramid_height", "int"); + AddAttr("pyramid_height", "int", "multi level pooling"); AddComment(R"DOC( "Does spatial pyramid pooling on the input image by taking the max, etc. within regions so that the result vector of different sized @@ -39,7 +39,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker { Where $$ H_{out} = N \\ - W_{out} = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * C_{in} + W_{out} = (((4^pyramid_height) - 1) / (4 - 1))$ * C_{in} $$ )DOC"); } diff --git a/paddle/operators/spp_op.h b/paddle/operators/spp_op.h index 7a385352a..0f2c43ee6 100644 --- a/paddle/operators/spp_op.h +++ b/paddle/operators/spp_op.h @@ -34,27 +34,27 @@ class SppKernel : public framework::OpKernel { size_t output_offset = 0; for (int p = 0; p < pyramid_height; ++p) { int bins = std::pow(2, p); - int ksize_h = std::ceil(input_h / static_cast(bins)); - int ksize_w = std::ceil(input_w / static_cast(bins)); - int padding_h = (ksize_h * bins - input_h + 1) / 2; - int padding_w = (ksize_w * bins - input_w + 1) / 2; - std::vector ksize({ksize_h, ksize_w}); - std::vector strides({ksize_h, ksize_w}); + int kernel_size_h = std::ceil(input_h / static_cast(bins)); + int kernel_size_w = std::ceil(input_w / static_cast(bins)); + int padding_h = (kernel_size_h * bins - input_h + 1) / 2; + int padding_w = (kernel_size_w * bins - input_w + 1) / 2; + std::vector kernel_size({kernel_size_h, kernel_size_w}); + std::vector strides({kernel_size_h, kernel_size_w}); std::vector paddings({padding_h, padding_w}); // pooling output shape framework::Tensor out_level; std::vector output_shape_vec({in_x->dims()[0], in_x->dims()[1]}); - output_shape_vec.push_back((input_h - ksize_h + 2 * padding_h) / ksize_h + - 1); - output_shape_vec.push_back((input_w - ksize_w + 2 * padding_w) / ksize_w + - 1); + output_shape_vec.push_back( + (input_h - kernel_size_h + 2 * padding_h) / kernel_size_h + 1); + output_shape_vec.push_back( + (input_w - kernel_size_w + 2 * padding_w) / kernel_size_w + 1); framework::DDim output_shape(framework::make_ddim(output_shape_vec)); out_level.mutable_data(output_shape, context.GetPlace()); // pooling math::Pool2dFunctor, T> pool_forward; math::MaxPool max_process; - pool_forward(context.device_context(), *in_x, ksize, strides, paddings, - max_process, &out_level); + pool_forward(context.device_context(), *in_x, kernel_size, strides, + paddings, max_process, &out_level); // flatten pooling output shape framework::Tensor out_flatten_level; int output_flatten_w = in_x->dims()[1] * bins * bins; @@ -96,12 +96,12 @@ class SppGradKernel : public framework::OpKernel { size_t out_offset = 0; for (int p = 0; p < pyramid_height; ++p) { int bins = std::pow(2, p); - int ksize_h = std::ceil(input_h / static_cast(bins)); - int ksize_w = std::ceil(input_w / static_cast(bins)); - int padding_h = (ksize_h * bins - input_h + 1) / 2; - int padding_w = (ksize_w * bins - input_w + 1) / 2; - std::vector ksize({ksize_h, ksize_w}); - std::vector strides({ksize_h, ksize_w}); + int kernel_size_h = std::ceil(input_h / static_cast(bins)); + int kernel_size_w = std::ceil(input_w / static_cast(bins)); + int padding_h = (kernel_size_h * bins - input_h + 1) / 2; + int padding_w = (kernel_size_w * bins - input_w + 1) / 2; + std::vector kernel_size({kernel_size_h, kernel_size_w}); + std::vector strides({kernel_size_h, kernel_size_w}); std::vector paddings({padding_h, padding_w}); // split out and outgrad ... to flatten framework::Tensor out_flatten_level; @@ -129,10 +129,10 @@ class SppGradKernel : public framework::OpKernel { framework::Tensor out_level; framework::Tensor outgrad_level; std::vector out_shape_vec({in_x->dims()[0], in_x->dims()[1]}); - out_shape_vec.push_back((input_h - ksize_h + 2 * padding_h) / ksize_h + - 1); - out_shape_vec.push_back((input_w - ksize_w + 2 * padding_w) / ksize_w + - 1); + out_shape_vec.push_back( + (input_h - kernel_size_h + 2 * padding_h) / kernel_size_h + 1); + out_shape_vec.push_back( + (input_w - kernel_size_w + 2 * padding_w) / kernel_size_w + 1); framework::DDim out_shape(framework::make_ddim(out_shape_vec)); out_level.ShareDataWith(out_flatten_level); out_level.Resize(out_shape); @@ -141,7 +141,8 @@ class SppGradKernel : public framework::OpKernel { // pooling backward math::MaxPool2dGradFunctor pool2d_backward; pool2d_backward(context.device_context(), *in_x, *&out_level, - *&outgrad_level, ksize, strides, paddings, in_x_grad); + *&outgrad_level, kernel_size, strides, paddings, + in_x_grad); } } }; diff --git a/python/paddle/v2/fluid/tests/test_spp_op.py b/python/paddle/v2/fluid/tests/test_spp_op.py index 89b12e885..b57f4a795 100644 --- a/python/paddle/v2/fluid/tests/test_spp_op.py +++ b/python/paddle/v2/fluid/tests/test_spp_op.py @@ -13,14 +13,19 @@ class TestSppOp(OpTest): out_level_flatten = [] for i in xrange(self.pyramid_height): bins = np.power(2, i) - ksize = [0, 0] + kernel_size = [0, 0] padding = [0, 0] - ksize[0] = np.ceil(hsize / bins.astype("double")).astype("int32") - padding[0] = ((ksize[0] * bins - hsize + 1) / 2).astype("int32") - - ksize[1] = np.ceil(wsize / bins.astype("double")).astype("int32") - padding[1] = ((ksize[1] * bins - wsize + 1) / 2).astype("int32") - out_level = max_pool2D_forward_naive(input, ksize, ksize, padding) + kernel_size[0] = np.ceil(hsize / + bins.astype("double")).astype("int32") + padding[0] = ( + (kernel_size[0] * bins - hsize + 1) / 2).astype("int32") + + kernel_size[1] = np.ceil(wsize / + bins.astype("double")).astype("int32") + padding[1] = ( + (kernel_size[1] * bins - wsize + 1) / 2).astype("int32") + out_level = max_pool2D_forward_naive(input, kernel_size, + kernel_size, padding) out_level_flatten.append( out_level.reshape(nsize, bins * bins * csize)) if i == 0: -- GitLab