pool_op.cc 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/operators/pool_op.h"
#include "lite/kernels/mlu/bridges/graph.h"
#include "lite/kernels/mlu/bridges/utility.h"
#include "lite/kernels/npu/bridges/registry.h"

namespace paddle {
namespace lite {
namespace subgraph {
namespace mlu {

inline cnmlPoolMode_t ToCnmlPoolMode(const std::string& pool_mode) {
  cnmlPoolMode_t cnml_pool_mode;
  if (pool_mode == "max") {
    cnml_pool_mode = CNML_POOL_MAX;
  } else if (pool_mode == "avg") {
    cnml_pool_mode = CNML_POOL_AVG;
  } else {
    CHECK(false) << "Unexpected pool mode " << pool_mode;
  }

  return cnml_pool_mode;
}

int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {
  CHECK(ctx != nullptr);
  CHECK(op != nullptr);
  auto graph = static_cast<Graph*>(ctx);
  auto op_info = op->op_info();
  auto op_type = op_info->Type();
  auto scope = op->scope();
  VLOG(3) << "[MLU] Converting " + op_type + "...";

  // Get input, and attributes
  auto x_var_name = op_info->Input("X").front();
  auto x = scope->FindTensor(x_var_name);
  auto output_var_name = op_info->Output("Out").front();
51
  auto output_shape = scope->FindTensor(output_var_name)->dims().Vectorize();
52 53 54 55 56 57
  auto pooling_type = op_info->GetAttr<std::string>("pooling_type");
  auto ceil_mode = op_info->GetAttr<bool>("ceil_mode");
  auto paddings = op_info->GetAttr<std::vector<int>>("paddings");
  auto global_pooling = op_info->GetAttr<bool>("global_pooling");
  auto ksize = op_info->GetAttr<std::vector<int>>("ksize");
  auto strides = op_info->GetAttr<std::vector<int>>("strides");
J
jiaopu 已提交
58 59 60
  CHECK(!(op_info->HasAttr("exclusive") &&
          op_info->GetAttr<bool>("exclusive") == false))
      << "Unsupport param exclusive is false!";
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

  if (paddings.size() == 2L) {
    for (size_t i = 0; i < 2L; ++i) {
      int copy_pad = *(paddings.begin() + 2 * i);
      paddings.insert(paddings.begin() + 2 * i + 1, copy_pad);
    }
  }
  std::string padding_algorithm("");
  if (op_info->HasAttr("padding_algorithm")) {
    padding_algorithm = op_info->GetAttr<std::string>("padding_algorithm");
  }
  bool adaptive = false;
  if (op_info->HasAttr("adaptive")) {
    adaptive = op_info->GetAttr<bool>("adaptive");
  }
76
  auto input_dims = x->dims();
77

78 79 80 81 82 83 84 85
  lite::operators::UpdatePadding(&paddings,
                                 global_pooling,
                                 adaptive,
                                 padding_algorithm,
                                 x->dims(),
                                 strides,
                                 ksize);

86 87 88 89 90 91
  if (global_pooling) {
    ksize.resize(static_cast<size_t>(input_dims.size()) - 2);
    for (size_t i = 0; i < ksize.size(); ++i) {
      ksize[i] = static_cast<int>(input_dims[i + 2]);
    }
  }
92

93 94
  auto output_tensor = graph->AddNode(
      output_var_name, output_shape, CNML_TENSOR, CNML_NCHW, graph->FPType());
95 96 97

  cnmlPoolOpParam_t pool_param;
  CNML_CALL(
98
      cnmlCreatePoolOpParam_V3(&pool_param,
99 100 101 102
                               ksize[0],
                               ksize[1],
                               strides[0],
                               strides[1],
103 104 105 106 107 108
                               paddings[0],
                               paddings[1],
                               paddings[2],
                               paddings[3],
                               1,  // dilation h
                               1,  // dilation w
109
                               ToCnmlPoolMode(pooling_type),
110
                               ceil_mode ? CNML_POOL_KFULL : CNML_POOL_KVALID,
111 112 113 114 115 116 117 118 119
                               true, /* real */
                               1 /* blend factor */));
  cnmlBaseOp_t pool_op;
  CNML_CALL(cnmlCreatePoolOp(&pool_op,
                             pool_param,
                             graph->GetNode(x_var_name)->mlu_tensor(),
                             output_tensor->mlu_tensor()));
  CNML_CALL(cnmlDestroyPoolOpParam(&pool_param));
  graph->FuseOp(pool_op);
D
dingminghui 已提交
120
  CNML_CALL(cnmlDestroyBaseOp(&pool_op));
121 122 123 124 125 126 127 128 129 130 131
  return SUCCESS;
}

}  // namespace mlu
}  // namespace subgraph
}  // namespace lite
}  // namespace paddle

REGISTER_SUBGRAPH_BRIDGE(pool2d,
                         kMLU,
                         paddle::lite::subgraph::mlu::PoolConverter);