pool2d_op.cc 14.6 KB
Newer Older
N
nhzlx 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
16
#include "paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h"
N
nhzlx 已提交
17

W
wanghuancoder 已提交
18 19 20
namespace paddle {
namespace framework {
class Scope;
21

W
wanghuancoder 已提交
22 23 24 25 26 27
namespace proto {
class OpDesc;
}  // namespace proto
}  // namespace framework
}  // namespace paddle

N
nhzlx 已提交
28 29 30 31
namespace paddle {
namespace inference {
namespace tensorrt {

32 33 34 35
inline void DealCeilMode(const nvinfer1::Dims &input_shape,
                         std::vector<int> ksize, std::vector<int> strides,
                         std::vector<int> paddings, nvinfer1::DimsHW *pre_pad,
                         nvinfer1::DimsHW *post_pad, int input_dims) {
N
nhzlx 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
  int input_height = input_shape.d[input_dims - 2];
  int input_width = input_shape.d[input_dims - 1];
  int floor_h_output_size =
      (input_height - ksize[0] + 2 * paddings[0]) / strides[0] + 1;
  int ceil_h_output_size =
      (input_height - ksize[0] + 2 * paddings[0] + strides[0] - 1) /
          strides[0] +
      1;

  int floor_w_output_size =
      (input_width - ksize[1] + 2 * paddings[1]) / strides[1] + 1;
  int ceil_w_output_size =
      (input_width - ksize[1] + 2 * paddings[1] + strides[1] - 1) / strides[1] +
      1;
  if (floor_h_output_size != ceil_h_output_size) {
    post_pad->h() = strides[0] - 1;
  }

  if (floor_w_output_size != ceil_w_output_size) {
    post_pad->w() = strides[1] - 1;
  }
}

N
nhzlx 已提交
59 60 61 62 63
/*
 * Pool2dOp, IPoolingLayer in TRT. This Layer doesn't has weights.
 */
class Pool2dOpConverter : public OpConverter {
 public:
N
nhzlx 已提交
64 65
  void operator()(const framework::proto::OpDesc &op,
                  const framework::Scope &scope, bool test_mode) override {
M
minqiyang 已提交
66
    VLOG(4)
N
nhzlx 已提交
67 68
        << "convert a fluid pool2d op to tensorrt pool2d layer without bias";
    framework::OpDesc op_desc(op, nullptr);
N
nhzlx 已提交
69 70 71 72
    auto *input1 = engine_->GetITensor(op_desc.Input("X")[0]);
    nvinfer1::Dims input_shape = input1->getDimensions();
    int input_dims = input_shape.nbDims;

73 74
    bool global_pooling =
        BOOST_GET_CONST(bool, op_desc.GetAttr("global_pooling"));
N
nhzlx 已提交
75
    std::string pool_type =
76
        BOOST_GET_CONST(std::string, op_desc.GetAttr("pooling_type"));
N
nhzlx 已提交
77
    std::vector<int> ksize =
78
        BOOST_GET_CONST(std::vector<int>, op_desc.GetAttr("ksize"));
N
nhzlx 已提交
79
    std::vector<int> strides =
80
        BOOST_GET_CONST(std::vector<int>, op_desc.GetAttr("strides"));
N
nhzlx 已提交
81
    std::vector<int> paddings =
82
        BOOST_GET_CONST(std::vector<int>, op_desc.GetAttr("paddings"));
83 84 85
    bool exclusive = op_desc.HasAttr("exclusive")
                         ? BOOST_GET_CONST(bool, op_desc.GetAttr("exclusive"))
                         : true;
86
    bool ceil_mode = BOOST_GET_CONST(bool, op_desc.GetAttr("ceil_mode"));
87 88
    bool adaptive = false;
    if (op_desc.HasAttr("adaptive"))
89
      adaptive = BOOST_GET_CONST(bool, op_desc.GetAttr("adaptive"));
90 91 92 93
    std::string padding_algorithm = "EXPLICIT";
    if (op_desc.HasAttr("padding_algorithm"))
      padding_algorithm =
          BOOST_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm"));
N
nhzlx 已提交
94

N
nhzlx 已提交
95
    nvinfer1::PoolingType nv_pool_type = nvinfer1::PoolingType::kMAX;
96 97
    nvinfer1::ReduceOperation reduce_operation =
        nvinfer1::ReduceOperation::kMAX;
98 99
    plugin::PoolPlugin::PoolType plugin_pool_type =
        plugin::PoolPlugin::PoolType::max;
N
nhzlx 已提交
100
    if (pool_type == "max") {
N
nhzlx 已提交
101
      nv_pool_type = nvinfer1::PoolingType::kMAX;
102
      reduce_operation = nvinfer1::ReduceOperation::kMAX;
103
      plugin_pool_type = plugin::PoolPlugin::PoolType::max;
N
nhzlx 已提交
104
    } else if (pool_type == "avg") {
N
nhzlx 已提交
105
      nv_pool_type = nvinfer1::PoolingType::kAVERAGE;
106
      reduce_operation = nvinfer1::ReduceOperation::kAVG;
107
      plugin_pool_type = plugin::PoolPlugin::PoolType::avg;
N
nhzlx 已提交
108
    }
F
feng_shuai 已提交
109 110 111
    if (global_pooling || adaptive) {
      std::fill(paddings.begin(), paddings.end(), 0);
    }
N
nhzlx 已提交
112

113 114 115
    if (padding_algorithm == "VALID") {
      std::fill(paddings.begin(), paddings.end(), 0);
    }
N
nhzlx 已提交
116 117 118 119 120
    nvinfer1::DimsHW nv_ksize(ksize[0], ksize[1]);
    nvinfer1::DimsHW nv_strides(strides[0], strides[1]);
    nvinfer1::DimsHW nv_paddings(paddings[0], paddings[1]);

    nvinfer1::ILayer *layer = nullptr;
W
wenbin 已提交
121 122
    nvinfer1::DimsHW g_pre_pad(0, 0);
    nvinfer1::DimsHW g_post_pad(0, 0);
W
wenbin 已提交
123 124 125 126 127
    // paddle Non ceil_mode : Output size = (input size - filter size + 2 *
    // padding) / stride (stride size) + 1
    // tensorrt EXPLICIT_ROUND_DOWN: O = floor((M - DK) / S) + 1
    // so if M - DK < 0 we need extra padding
    if (input_shape.d[input_dims - 2] - ksize[0] + 2 * paddings[0] < 0) {
W
wenbin 已提交
128
      g_post_pad.h() = strides[0] - 1;
W
wenbin 已提交
129 130
    }
    if (input_shape.d[input_dims - 1] - ksize[1] + 2 * paddings[1] < 0) {
W
wenbin 已提交
131
      g_post_pad.w() = strides[1] - 1;
W
wenbin 已提交
132
    }
N
nhzlx 已提交
133

134
    if (op_desc.HasAttr("enable_int8")) {
135 136 137
      CHECK(op_desc.HasAttr("Input_scale"));
      float input_scale =
          BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale"));
138 139 140
      engine_->SetTensorDynamicRange(input1, input_scale);
    }

F
feng_shuai 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    std::vector<int> real_paddings = paddings;
    for (int i = 0; i < 2; ++i) {
      int copy_pad = *(paddings.begin() + i);
      real_paddings.insert(real_paddings.begin() + 2 * i + 1, copy_pad);
    }
    // SAME
    if (padding_algorithm == "SAME") {
      // expand
      for (int i = 0; i < 2; ++i) {
        int copy_pad = *(paddings.begin() + 2 * i);
        paddings.insert(paddings.begin() + 2 * i + 1, copy_pad);
      }
      // compute
      for (int i = 0; i < 2; ++i) {
        int out_size = (input_shape.d[2 + i] + strides[i] - 1) / strides[i];
        int pad_sum = std::max(
            (out_size - 1) * strides[i] + ksize[i] - input_shape.d[2 + i], 0);
        int pad_0 = pad_sum / 2;
        int pad_1 = pad_sum - pad_0;
        paddings[i * 2] = pad_0;
        paddings[i * 2 + 1] = pad_1;
      }
      real_paddings = paddings;
      // slice
      for (int i = 0; i < 2; ++i) {
        paddings.erase(paddings.begin() + i + 1);
      }
    }
    // VALID
    if (padding_algorithm == "VALID") {
      std::fill(real_paddings.begin(), real_paddings.end(), 0);
    }

    if (global_pooling == true && !engine_->with_dynamic_shape()) {
      nv_ksize.d[0] = input_shape.d[input_dims - 2];
      nv_ksize.d[1] = input_shape.d[input_dims - 1];
      ksize[0] = input_shape.d[input_dims - 2];
      ksize[1] = input_shape.d[input_dims - 1];
    }

181
    if (engine_->with_dynamic_shape()) {
182
      if (!adaptive && !global_pooling && !ceil_mode) {
W
wenbin 已提交
183 184 185 186 187
        // input_shape.d < 0 means we can't get shape info here.
        // we may suffer from issue if shape is not met finally.
        if ((padding_algorithm != "SAME") &&
            ((g_post_pad.w() > 0 && input_shape.d[input_dims - 2] > 0) ||
             (g_post_pad.h() > 0 && input_shape.d[input_dims - 1] > 0))) {
W
wenbin 已提交
188
          auto *pad_layer = TRT_ENGINE_ADD_LAYER(engine_, Padding, *input1,
W
wenbin 已提交
189
                                                 g_pre_pad, g_post_pad);
W
wenbin 已提交
190 191 192 193 194 195
          PADDLE_ENFORCE_NOT_NULL(
              pad_layer, platform::errors::Fatal(
                             "Pad layer in poolOp converter could not be "
                             "created. The pointer to pad layer is `NULL`."));
          input1 = pad_layer->getOutput(0);
        }
W
wenbin 已提交
196

197 198 199 200 201 202 203 204 205 206
        auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
                                                nv_pool_type, nv_ksize);
        pool_layer->setStride(nv_strides);
        pool_layer->setPadding(nv_paddings);
        pool_layer->setAverageCountExcludesPadding(exclusive);
        if (padding_algorithm == "SAME") {
          pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
        }
        layer = pool_layer;
      } else if (!adaptive && !global_pooling && ceil_mode) {
207 208 209 210
        auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
                                                nv_pool_type, nv_ksize);
        pool_layer->setStride(nv_strides);
        pool_layer->setPadding(nv_paddings);
211
        pool_layer->setAverageCountExcludesPadding(exclusive);
212 213
        if (padding_algorithm == "SAME") {
          pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
214 215
        } else {
          pool_layer->setPaddingMode(nvinfer1::PaddingMode::kEXPLICIT_ROUND_UP);
216
        }
217
        layer = pool_layer;
F
feng_shuai 已提交
218
      } else if (global_pooling && !adaptive) {
219 220 221
        auto *reduce_layer = TRT_ENGINE_ADD_LAYER(engine_, Reduce, *input1,
                                                  reduce_operation, 12, true);
        layer = reduce_layer;
222 223
      } else {
#if IS_TRT_VERSION_GE(6000)
F
feng_shuai 已提交
224 225 226
        plugin::PoolPluginDynamic *plugin = new plugin::PoolPluginDynamic(
            ceil_mode, pool_type, adaptive, exclusive, ksize, strides, paddings,
            global_pooling);
227
        layer = engine_->AddDynamicPlugin(&input1, 1, plugin);
228 229 230 231 232 233 234 235 236 237 238 239
#endif
      }
      auto output_name = op_desc.Output("Out")[0];
      layer->setName(("pool2d (Output: " + output_name + ")").c_str());
      layer->getOutput(0)->setName(output_name.c_str());
      engine_->SetITensor(output_name, layer->getOutput(0));
      if (test_mode) {
        engine_->DeclareOutput(output_name);
      }
      return;
    }

F
feng_shuai 已提交
240
    if (global_pooling == true && adaptive == false) {
W
wenbin 已提交
241 242
      auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
                                              nv_pool_type, nv_ksize);
243
      PADDLE_ENFORCE_NOT_NULL(
244 245
          pool_layer, platform::errors::Fatal(
                          "trt pool layer in converter could not be created."));
N
nhzlx 已提交
246
      auto output_name = op_desc.Output("Out")[0];
247 248 249 250
      pool_layer->setName(("pool2d (Output: " + output_name + ")").c_str());
      pool_layer->getOutput(0)->setName(output_name.c_str());
      engine_->SetITensor(output_name, pool_layer->getOutput(0));
      layer = pool_layer;
N
nhzlx 已提交
251
      if (test_mode) {
N
nhzlx 已提交
252
        engine_->DeclareOutput(output_name);
253
      }
N
nhzlx 已提交
254 255
      return;
    }
256

257
    if (!adaptive) {
N
nhzlx 已提交
258
      if (ceil_mode) {
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
        if (nv_ksize.d[0] % nv_strides.d[0] == 0 &&
            nv_ksize.d[1] % nv_strides.d[1] == 0) {
          nvinfer1::DimsHW pre_pad(0, 0);
          nvinfer1::DimsHW post_pad(0, 0);
          // If ceil mode is true, we will pad the appropriate size to the
          // input.
          DealCeilMode(input_shape, ksize, strides, paddings, &pre_pad,
                       &post_pad, input_dims);
          auto *pad_layer = TRT_ENGINE_ADD_LAYER(engine_, Padding, *input1,
                                                 pre_pad, post_pad);

          PADDLE_ENFORCE_NOT_NULL(
              pad_layer, platform::errors::Fatal(
                             "Pad layer in poolOp converter could not be "
                             "created. The pointer to pad layer is `NULL`."));
          input1 = pad_layer->getOutput(0);

          auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
                                                  nv_pool_type, nv_ksize);
          PADDLE_ENFORCE_NOT_NULL(
              pool_layer,
              platform::errors::Fatal(
                  "trt pool layer in converter could not be created."));
          pool_layer->setStride(nv_strides);
          pool_layer->setPadding(nv_paddings);
          if (padding_algorithm == "SAME") {
            pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
          }
          pool_layer->setAverageCountExcludesPadding(exclusive);
          layer = pool_layer;
        } else {
          std::vector<int> input_shape_v;
          for (int i = 0; i < input_dims; i++) {
            input_shape_v.push_back(input_shape.d[i]);
          }
          plugin::PoolPlugin *plugin = new plugin::PoolPlugin(
              ceil_mode, plugin_pool_type, adaptive, exclusive, ksize, strides,
              paddings, input_shape_v, real_paddings);
          auto *pool_layer = engine_->AddPlugin(&input1, 1, plugin);
          PADDLE_ENFORCE_NOT_NULL(
              pool_layer,
              platform::errors::Fatal(
                  "trt pool plugin layer in converter could not be created."));
          layer = pool_layer;
F
feng_shuai 已提交
303 304
        }
      } else {
W
wenbin 已提交
305
#if IS_TRT_VERSION_GE(8000)
F
feng_shuai 已提交
306 307 308 309 310 311 312 313 314 315 316 317 318
        // Exclude padding pixels from the average mean is not supported well by
        // TRT
        // so enable padding for trt8.0 above.
        if ((g_post_pad.w() > 0 || g_post_pad.h() > 0) &&
            (padding_algorithm != "SAME") && !ceil_mode) {
          auto *pad_layer = TRT_ENGINE_ADD_LAYER(engine_, Padding, *input1,
                                                 g_pre_pad, g_post_pad);
          PADDLE_ENFORCE_NOT_NULL(
              pad_layer, platform::errors::Fatal(
                             "Pad layer in poolOp converter could not be "
                             "created. The pointer to pad layer is `NULL`."));
          input1 = pad_layer->getOutput(0);
        }
W
wenbin 已提交
319
#endif
F
feng_shuai 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332
        auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
                                                nv_pool_type, nv_ksize);
        PADDLE_ENFORCE_NOT_NULL(
            pool_layer,
            platform::errors::Fatal(
                "trt pool layer in converter could not be created."));
        pool_layer->setStride(nv_strides);
        pool_layer->setPadding(nv_paddings);
        if (padding_algorithm == "SAME") {
          pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
        }
        pool_layer->setAverageCountExcludesPadding(exclusive);
        layer = pool_layer;
333
      }
N
nhzlx 已提交
334 335 336
    } else {
      // Average pooling needs to exclude the padding pixels from the average
      // mean.
F
feng_shuai 已提交
337
      // It is not supported well by TRT, we use a plugin here
N
nhzlx 已提交
338 339 340
      std::vector<int> input_shape_v;
      for (int i = 0; i < input_dims; i++) {
        input_shape_v.push_back(input_shape.d[i]);
341
      }
F
feng_shuai 已提交
342 343 344
      plugin::PoolPlugin *plugin = new plugin::PoolPlugin(
          ceil_mode, plugin_pool_type, adaptive, exclusive, ksize, strides,
          paddings, input_shape_v, real_paddings);
345
      auto *pool_layer = engine_->AddPlugin(&input1, 1, plugin);
346 347 348 349
      PADDLE_ENFORCE_NOT_NULL(
          pool_layer,
          platform::errors::Fatal(
              "trt pool plugin layer in converter could not be created."));
350
      layer = pool_layer;
351
    }
N
nhzlx 已提交
352
    auto output_name = op_desc.Output("Out")[0];
353
    RreplenishLayerAndOutput(layer, "pool2d", {output_name}, test_mode);
N
nhzlx 已提交
354 355 356 357 358 359 360 361
  }
};

}  // namespace tensorrt
}  // namespace inference
}  // namespace paddle

REGISTER_TRT_OP_CONVERTER(pool2d, Pool2dOpConverter);