提交 8368e55b 编写于 作者: S sweetsky0901

modify some doc

上级 531e7b6f
...@@ -29,7 +29,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -29,7 +29,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker {
"(Tensor) The output tensor of spp operator." "(Tensor) The output tensor of spp operator."
"N * M." "N * M."
"M = C * H * W"); "M = C * H * W");
AddAttr<int>("pyramid_height", "int"); AddAttr<int>("pyramid_height", "int", "multi level pooling");
AddComment(R"DOC( AddComment(R"DOC(
"Does spatial pyramid pooling on the input image by taking the max, "Does spatial pyramid pooling on the input image by taking the max,
etc. within regions so that the result vector of different sized etc. within regions so that the result vector of different sized
...@@ -39,7 +39,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -39,7 +39,7 @@ class SppOpMaker : public framework::OpProtoAndCheckerMaker {
Where Where
$$ $$
H_{out} = N \\ H_{out} = N \\
W_{out} = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * C_{in} W_{out} = (((4^pyramid_height) - 1) / (4 - 1))$ * C_{in}
$$ $$
)DOC"); )DOC");
} }
......
...@@ -34,27 +34,27 @@ class SppKernel : public framework::OpKernel<T> { ...@@ -34,27 +34,27 @@ class SppKernel : public framework::OpKernel<T> {
size_t output_offset = 0; size_t output_offset = 0;
for (int p = 0; p < pyramid_height; ++p) { for (int p = 0; p < pyramid_height; ++p) {
int bins = std::pow(2, p); int bins = std::pow(2, p);
int ksize_h = std::ceil(input_h / static_cast<double>(bins)); int kernel_size_h = std::ceil(input_h / static_cast<double>(bins));
int ksize_w = std::ceil(input_w / static_cast<double>(bins)); int kernel_size_w = std::ceil(input_w / static_cast<double>(bins));
int padding_h = (ksize_h * bins - input_h + 1) / 2; int padding_h = (kernel_size_h * bins - input_h + 1) / 2;
int padding_w = (ksize_w * bins - input_w + 1) / 2; int padding_w = (kernel_size_w * bins - input_w + 1) / 2;
std::vector<int> ksize({ksize_h, ksize_w}); std::vector<int> kernel_size({kernel_size_h, kernel_size_w});
std::vector<int> strides({ksize_h, ksize_w}); std::vector<int> strides({kernel_size_h, kernel_size_w});
std::vector<int> paddings({padding_h, padding_w}); std::vector<int> paddings({padding_h, padding_w});
// pooling output shape // pooling output shape
framework::Tensor out_level; framework::Tensor out_level;
std::vector<int64_t> output_shape_vec({in_x->dims()[0], in_x->dims()[1]}); std::vector<int64_t> output_shape_vec({in_x->dims()[0], in_x->dims()[1]});
output_shape_vec.push_back((input_h - ksize_h + 2 * padding_h) / ksize_h + output_shape_vec.push_back(
1); (input_h - kernel_size_h + 2 * padding_h) / kernel_size_h + 1);
output_shape_vec.push_back((input_w - ksize_w + 2 * padding_w) / ksize_w + output_shape_vec.push_back(
1); (input_w - kernel_size_w + 2 * padding_w) / kernel_size_w + 1);
framework::DDim output_shape(framework::make_ddim(output_shape_vec)); framework::DDim output_shape(framework::make_ddim(output_shape_vec));
out_level.mutable_data<T>(output_shape, context.GetPlace()); out_level.mutable_data<T>(output_shape, context.GetPlace());
// pooling // pooling
math::Pool2dFunctor<Place, math::MaxPool<T>, T> pool_forward; math::Pool2dFunctor<Place, math::MaxPool<T>, T> pool_forward;
math::MaxPool<T> max_process; math::MaxPool<T> max_process;
pool_forward(context.device_context(), *in_x, ksize, strides, paddings, pool_forward(context.device_context(), *in_x, kernel_size, strides,
max_process, &out_level); paddings, max_process, &out_level);
// flatten pooling output shape // flatten pooling output shape
framework::Tensor out_flatten_level; framework::Tensor out_flatten_level;
int output_flatten_w = in_x->dims()[1] * bins * bins; int output_flatten_w = in_x->dims()[1] * bins * bins;
...@@ -96,12 +96,12 @@ class SppGradKernel : public framework::OpKernel<T> { ...@@ -96,12 +96,12 @@ class SppGradKernel : public framework::OpKernel<T> {
size_t out_offset = 0; size_t out_offset = 0;
for (int p = 0; p < pyramid_height; ++p) { for (int p = 0; p < pyramid_height; ++p) {
int bins = std::pow(2, p); int bins = std::pow(2, p);
int ksize_h = std::ceil(input_h / static_cast<double>(bins)); int kernel_size_h = std::ceil(input_h / static_cast<double>(bins));
int ksize_w = std::ceil(input_w / static_cast<double>(bins)); int kernel_size_w = std::ceil(input_w / static_cast<double>(bins));
int padding_h = (ksize_h * bins - input_h + 1) / 2; int padding_h = (kernel_size_h * bins - input_h + 1) / 2;
int padding_w = (ksize_w * bins - input_w + 1) / 2; int padding_w = (kernel_size_w * bins - input_w + 1) / 2;
std::vector<int> ksize({ksize_h, ksize_w}); std::vector<int> kernel_size({kernel_size_h, kernel_size_w});
std::vector<int> strides({ksize_h, ksize_w}); std::vector<int> strides({kernel_size_h, kernel_size_w});
std::vector<int> paddings({padding_h, padding_w}); std::vector<int> paddings({padding_h, padding_w});
// split out and outgrad ... to flatten // split out and outgrad ... to flatten
framework::Tensor out_flatten_level; framework::Tensor out_flatten_level;
...@@ -129,10 +129,10 @@ class SppGradKernel : public framework::OpKernel<T> { ...@@ -129,10 +129,10 @@ class SppGradKernel : public framework::OpKernel<T> {
framework::Tensor out_level; framework::Tensor out_level;
framework::Tensor outgrad_level; framework::Tensor outgrad_level;
std::vector<int64_t> out_shape_vec({in_x->dims()[0], in_x->dims()[1]}); std::vector<int64_t> out_shape_vec({in_x->dims()[0], in_x->dims()[1]});
out_shape_vec.push_back((input_h - ksize_h + 2 * padding_h) / ksize_h + out_shape_vec.push_back(
1); (input_h - kernel_size_h + 2 * padding_h) / kernel_size_h + 1);
out_shape_vec.push_back((input_w - ksize_w + 2 * padding_w) / ksize_w + out_shape_vec.push_back(
1); (input_w - kernel_size_w + 2 * padding_w) / kernel_size_w + 1);
framework::DDim out_shape(framework::make_ddim(out_shape_vec)); framework::DDim out_shape(framework::make_ddim(out_shape_vec));
out_level.ShareDataWith(out_flatten_level); out_level.ShareDataWith(out_flatten_level);
out_level.Resize(out_shape); out_level.Resize(out_shape);
...@@ -141,7 +141,8 @@ class SppGradKernel : public framework::OpKernel<T> { ...@@ -141,7 +141,8 @@ class SppGradKernel : public framework::OpKernel<T> {
// pooling backward // pooling backward
math::MaxPool2dGradFunctor<Place, T> pool2d_backward; math::MaxPool2dGradFunctor<Place, T> pool2d_backward;
pool2d_backward(context.device_context(), *in_x, *&out_level, pool2d_backward(context.device_context(), *in_x, *&out_level,
*&outgrad_level, ksize, strides, paddings, in_x_grad); *&outgrad_level, kernel_size, strides, paddings,
in_x_grad);
} }
} }
}; };
......
...@@ -13,14 +13,19 @@ class TestSppOp(OpTest): ...@@ -13,14 +13,19 @@ class TestSppOp(OpTest):
out_level_flatten = [] out_level_flatten = []
for i in xrange(self.pyramid_height): for i in xrange(self.pyramid_height):
bins = np.power(2, i) bins = np.power(2, i)
ksize = [0, 0] kernel_size = [0, 0]
padding = [0, 0] padding = [0, 0]
ksize[0] = np.ceil(hsize / bins.astype("double")).astype("int32") kernel_size[0] = np.ceil(hsize /
padding[0] = ((ksize[0] * bins - hsize + 1) / 2).astype("int32") bins.astype("double")).astype("int32")
padding[0] = (
ksize[1] = np.ceil(wsize / bins.astype("double")).astype("int32") (kernel_size[0] * bins - hsize + 1) / 2).astype("int32")
padding[1] = ((ksize[1] * bins - wsize + 1) / 2).astype("int32")
out_level = max_pool2D_forward_naive(input, ksize, ksize, padding) kernel_size[1] = np.ceil(wsize /
bins.astype("double")).astype("int32")
padding[1] = (
(kernel_size[1] * bins - wsize + 1) / 2).astype("int32")
out_level = max_pool2D_forward_naive(input, kernel_size,
kernel_size, padding)
out_level_flatten.append( out_level_flatten.append(
out_level.reshape(nsize, bins * bins * csize)) out_level.reshape(nsize, bins * bins * csize))
if i == 0: if i == 0:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册