pool_op.cc 3.3 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/operators/pool_op.h"
16
#include <algorithm>
Y
Yan Chunwei 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
#include "lite/core/op_registry.h"

namespace paddle {
namespace lite {
namespace operators {

bool PoolOpLite::CheckShape() const {
  CHECK_OR_FALSE(param_.x);
  CHECK_OR_FALSE(param_.output);

  const auto& x_dims = param_.x->dims();
  const auto& ksize = param_.ksize;
  const auto& strides = param_.strides;
30
  const auto& paddings = *param_.paddings;
Y
Yan Chunwei 已提交
31 32 33 34 35 36 37

  // "Pooling intput should be 4-D or 5-D tensor."
  CHECK_OR_FALSE(x_dims.size() == 4 || x_dims.size() == 5);
  // Input size and pooling size should be consistent.
  CHECK_OR_FALSE(x_dims.size() - ksize.size() == 2U);
  // Strides size and pooling size should be the same.
  CHECK_OR_FALSE(ksize.size() == strides.size());
38 39
  // Paddings size must be 4.
  CHECK_OR_FALSE(paddings.size() == 4L);
Y
Yan Chunwei 已提交
40 41 42 43

  return true;
}

44 45 46 47 48 49
int PoolOutputSize(int input_size,
                   int filter_size,
                   int pad_left,
                   int pad_right,
                   int stride,
                   bool ceil_mode) {
Y
Yan Chunwei 已提交
50 51
  int output_size;
  if (!ceil_mode) {
52 53
    output_size =
        (input_size - filter_size + pad_left + pad_right) / stride + 1;
Y
Yan Chunwei 已提交
54 55
  } else {
    output_size =
56 57 58
        (input_size - filter_size + pad_left + pad_right + stride - 1) /
            stride +
        1;
Y
Yan Chunwei 已提交
59 60 61 62 63 64 65
  }
  return output_size;
}

bool PoolOpLite::InferShape() const {
  const auto x_dims = param_.x->dims();
  std::vector<int>& ksize = param_.ksize;
66 67 68 69 70 71 72 73
  // dynamic update 4-pad
  UpdatePadding(param_.paddings.get(),
                param_.global_pooling,
                param_.adaptive,
                padding_algorithm_,
                x_dims,
                param_.strides,
                ksize);
Y
Yan Chunwei 已提交
74 75 76 77 78 79
  if (param_.global_pooling) {
    ksize.resize(static_cast<size_t>(x_dims.size()) - 2);
    for (size_t i = 0; i < ksize.size(); ++i) {
      ksize[i] = static_cast<int>(x_dims[i + 2]);
    }
  }
80
  auto paddings = *param_.paddings;
Y
Yan Chunwei 已提交
81 82 83 84 85 86 87 88
  std::vector<int64_t> output_shape({x_dims[0], x_dims[1]});
  if (param_.adaptive) {
    output_shape.insert(
        output_shape.end(), param_.ksize.begin(), param_.ksize.end());
  } else {
    for (size_t i = 0; i < param_.ksize.size(); ++i) {
      output_shape.push_back(PoolOutputSize(x_dims[i + 2],
                                            param_.ksize[i],
89 90
                                            paddings[2 * i],
                                            paddings[2 * i + 1],
Y
Yan Chunwei 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104
                                            param_.strides[i],
                                            param_.ceil_mode));
    }
  }
  param_.output->Resize(lite::DDim(output_shape));

  return true;
}

}  // namespace operators
}  // namespace lite
}  // namespace paddle

REGISTER_LITE_OP(pool2d, paddle::lite::operators::PoolOpLite);