pool_op.h 4.4 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

Z
zhupengyang 已提交
17
#include <algorithm>
18
#include <memory>
Y
Yan Chunwei 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include <string>
#include <vector>
#include "lite/core/kernel.h"
#include "lite/core/op_lite.h"
#include "lite/core/scope.h"
#include "lite/core/tensor.h"
#include "lite/operators/op_params.h"
#include "lite/utils/all.h"

namespace paddle {
namespace lite {
namespace operators {

class PoolOpLite : public OpLite {
 public:
  PoolOpLite() {}

  explicit PoolOpLite(const std::string &type) : OpLite(type) {}

  bool CheckShape() const override;

40
  bool InferShapeImpl() const override;
Y
Yan Chunwei 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55

  // TODO(Superjomn) replace framework::OpDesc with a lite one.
  bool AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) override {
    auto x = op_desc.Input("X").front();
    auto out = op_desc.Output("Out").front();

    CHECK(scope->FindVar(x));
    CHECK(scope->FindVar(out));
    param_.x = scope->FindVar(x)->GetMutable<lite::Tensor>();
    param_.output = scope->FindVar(out)->GetMutable<lite::Tensor>();

    param_.pooling_type = op_desc.GetAttr<std::string>("pooling_type");
    param_.ksize = op_desc.GetAttr<std::vector<int>>("ksize");
    param_.global_pooling = op_desc.GetAttr<bool>("global_pooling");
    param_.strides = op_desc.GetAttr<std::vector<int>>("strides");
56
    auto paddings = op_desc.GetAttr<std::vector<int>>("paddings");
Y
Yan Chunwei 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69

    if (op_desc.HasAttr("exclusive")) {
      param_.exclusive = op_desc.GetAttr<bool>("exclusive");
    }
    if (op_desc.HasAttr("adaptive")) {
      param_.adaptive = op_desc.GetAttr<bool>("adaptive");
    }
    if (op_desc.HasAttr("ceil_mode")) {
      param_.ceil_mode = op_desc.GetAttr<bool>("ceil_mode");
    }
    if (op_desc.HasAttr("use_quantizer")) {
      param_.use_quantizer = op_desc.GetAttr<bool>("use_quantizer");
    }
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
    if (op_desc.HasAttr("padding_algorithm")) {
      padding_algorithm_ = op_desc.GetAttr<std::string>("padding_algorithm");
    }
    // 2-pad to 4-pad
    if (paddings.size() == 2L) {
      for (size_t i = 0; i < 2L; ++i) {
        int copy_pad = *(paddings.begin() + 2 * i);
        paddings.insert(paddings.begin() + 2 * i + 1, copy_pad);
      }
    } else {
      if (paddings.size() != 4L) {
        LOG(FATAL)
            << "Paddings size should be the same or twice as the inputs size.";
      }
    }
    param_.paddings = std::make_shared<std::vector<int>>(paddings);

Y
Yan Chunwei 已提交
87 88 89 90 91 92 93 94 95
    return true;
  }

  void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); }

  std::string DebugString() const override { return "pool2d"; }

 private:
  mutable PoolParam param_;
96
  std::string padding_algorithm_{""};
Y
Yan Chunwei 已提交
97 98
};

Z
zhupengyang 已提交
99 100 101 102 103 104 105 106 107
inline void UpdatePadding(std::vector<int> *paddings,
                          const bool global_pooling,
                          const bool adaptive,
                          const std::string padding_algorithm,
                          const lite::DDim data_dims,
                          const std::vector<int> &strides,
                          const std::vector<int> &ksize) {
  // when padding_algorithm is "VALID" or "SAME"
  if (padding_algorithm == "SAME") {
108
    for (size_t i = 0; i < strides.size(); ++i) {
Z
zhupengyang 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
      int out_size = (data_dims[i + 2] + strides[i] - 1) / strides[i];
      int pad_sum =
          std::max((out_size - 1) * strides[i] + ksize[i] - data_dims[i + 2],
                   (int64_t)0);
      int pad_0 = pad_sum / 2;
      int pad_1 = pad_sum - pad_0;
      *(paddings->begin() + i * 2) = pad_0;
      *(paddings->begin() + i * 2 + 1) = pad_1;
    }
  } else if (padding_algorithm == "VALID") {
    for (auto it = paddings->begin(); it != paddings->end(); it++) {
      *it = 0;
    }
  }

  // if global_pooling == true or adaptive == true, padding will be ignore
  if (global_pooling || adaptive) {
    for (auto it = paddings->begin(); it != paddings->end(); it++) {
      *it = 0;
    }
  }
}

Y
Yan Chunwei 已提交
132 133 134
}  // namespace operators
}  // namespace lite
}  // namespace paddle