pool_op_mlu.cc 15.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

F
From00 已提交
15
#include "paddle/fluid/framework/op_registry.h"
16
#include "paddle/fluid/operators/mlu/mlu_baseop.h"
F
From00 已提交
17
#include "paddle/phi/kernels/funcs/pooling.h"
18 19 20 21 22 23 24

namespace paddle {
namespace operators {

namespace {

cnnlPoolingMode_t ToCnnlPoolingMode(const std::string &pooling_type,
25 26
                                    bool exclusive,
                                    bool adaptive) {
27 28 29 30
  cnnlPoolingMode_t pooling_mode;
  if (pooling_type == "max") {
    pooling_mode = CNNL_POOLING_MAX;
  } else if (pooling_type == "avg") {
31
    if (exclusive && !adaptive) {
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
      pooling_mode = CNNL_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
    } else {
      pooling_mode = CNNL_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
    }
  } else {
    PADDLE_THROW(platform::errors::InvalidArgument("Unknown pooling_type: %s",
                                                   pooling_type));
  }
  return pooling_mode;
}
}  // namespace

template <typename T>
class MLUPoolOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto &dev_ctx = ctx.template device_context<platform::MLUDeviceContext>();
    const Tensor *in_x = ctx.Input<Tensor>("X");
    Tensor *out = ctx.Output<Tensor>("Out");
    out->mutable_data<T>(ctx.GetPlace());

    std::string pooling_type = ctx.Attr<std::string>("pooling_type");
    std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
    std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
    std::string data_format = ctx.Attr<std::string>("data_format");

    bool global_pooling = ctx.Attr<bool>("global_pooling");
    bool ceil_mode = ctx.Attr<bool>("ceil_mode");
    bool exclusive = ctx.Attr<bool>("exclusive");
    bool adaptive = ctx.Attr<bool>("adaptive");
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");

65 66
    PADDLE_ENFORCE_EQ(in_x->dims().size(),
                      4,
67 68 69
                      platform::errors::InvalidArgument(
                          "Only support 4-dims for mlu pool2d kernel."));

70
    const bool channel_last = data_format == "NHWC";
71 72 73 74 75 76
    // default
    cnnlTensorLayout_t cnnl_layout = CNNL_LAYOUT_NCHW;
    auto out_dims = out->dims();
    int64_t out_h = out_dims[2];
    int64_t out_w = out_dims[3];
    auto in_x_dims = in_x->dims();
77
    framework::DDim data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
78 79 80 81 82

    if (channel_last) {
      cnnl_layout = CNNL_LAYOUT_NHWC;
      out_h = out_dims[1];
      out_w = out_dims[2];
83
      data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
84 85
    }

86 87 88 89 90 91 92
    phi::funcs::UpdatePadding(&paddings,
                              global_pooling,
                              adaptive,
                              padding_algorithm,
                              data_dims,
                              strides,
                              ksize);
93
    if (global_pooling) {
F
From00 已提交
94
      phi::funcs::UpdateKernelSize(&ksize, data_dims);
95 96 97 98 99
    }

    MLUCnnlTensorDesc in_x_desc(*in_x, cnnl_layout, ToCnnlDataType<T>());
    MLUCnnlTensorDesc out_desc(*out, cnnl_layout, ToCnnlDataType<T>());

100 101 102
    cnnlPoolingMode_t pool_mode =
        ToCnnlPoolingMode(pooling_type, exclusive, adaptive);

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
    // transpose NCHW to NHWC since cnnl pool2d has worse performance in that
    // layout.
    framework::Tensor trans_in_x;
    framework::Tensor trans_out;
    if (channel_last) {
      trans_in_x = *in_x;
      trans_out = *out;
    } else {
      std::vector<int> perm{0, 2, 3, 1};
      TransposeFromMLUTensor<T>(
          ctx, perm, in_x, &trans_in_x, true /*need_reshape_or_alloc*/);
      trans_out = ctx.AllocateTmpTensor<T, MLUDeviceContext>(
          {out_dims[0], out_dims[2], out_dims[3], out_dims[1]}, dev_ctx);
    }
    MLUCnnlTensorDesc trans_in_x_desc(
        trans_in_x, CNNL_LAYOUT_NHWC, ToCnnlDataType<T>());
    MLUCnnlTensorDesc trans_out_desc(
        trans_out, CNNL_LAYOUT_NHWC, ToCnnlDataType<T>());

122
    if (!adaptive) {
123 124 125 126 127 128 129 130 131 132 133 134 135
      MLUCnnlPoolingDesc pool_desc(pool_mode,
                                   CNNL_NOT_PROPAGATE_NAN,
                                   ksize[0],
                                   ksize[1],
                                   paddings[0],
                                   paddings[1],
                                   paddings[2],
                                   paddings[3],
                                   strides[0],
                                   strides[1],
                                   1 /*row_dilation*/,
                                   1 /*col_dilation*/,
                                   ceil_mode);
136 137 138 139

      size_t extra_input_size = 0;
      cnnlHandle_t handle =
          ctx.template device_context<MLUDeviceContext>().cnnl_handle();
140 141
      cnnlGetPoolingExtraInputSize(
          handle, pool_mode, out_w, out_h, &extra_input_size);
142

143
      if (extra_input_size > 0) {
L
Leo Chen 已提交
144
        phi::CPUContext cpu_ctx;
145
        framework::Tensor extra_host_tensor =
L
Leo Chen 已提交
146
            ctx.AllocateTmpTensor<int8_t, phi::CPUContext>(
147
                {static_cast<int64_t>(extra_input_size)}, cpu_ctx);
148 149
        cnnlInitPoolingExtraInput(handle,
                                  pool_desc.get(),
150 151
                                  trans_in_x_desc.get(),
                                  trans_out_desc.get(),
152 153 154 155
                                  GetBasePtr(&extra_host_tensor));
        framework::Tensor extra_device_tensor =
            ctx.AllocateTmpTensor<int8_t, MLUDeviceContext>(
                {static_cast<int64_t>(extra_input_size)}, dev_ctx);
156 157
        framework::TensorCopy(
            extra_host_tensor, ctx.GetPlace(), &extra_device_tensor);
158 159 160 161 162 163 164 165
        // Increase extra_host_tensor holder_ reference count until copy
        // complete.
        auto increase_ref_count = [extra_host_tensor]() {
          VLOG(4) << "Finished copying extra_host_tensor["
                  << GetBasePtr(&extra_host_tensor)
                  << "] in mlu pooling kernel.";
        };
        dev_ctx.AddStreamCallback(increase_ref_count);
166
        MLUCnnl::PoolingForward(
167 168 169 170 171 172
            ctx,
            pool_mode,
            out_h,
            out_w,
            pool_desc.get(),
            nullptr /*alpha*/,
173 174
            trans_in_x_desc.get(),
            GetBasePtr(&trans_in_x),
175
            nullptr /*beta*/,
176
            GetBasePtr(&extra_device_tensor) /*params_shape_ptr*/,
177 178
            trans_out_desc.get(),
            GetBasePtr(&trans_out));
179
      } else {
180 181 182 183 184 185
        MLUCnnl::PoolingForward(ctx,
                                pool_mode,
                                out_h,
                                out_w,
                                pool_desc.get(),
                                nullptr /*alpha*/,
186 187
                                trans_in_x_desc.get(),
                                GetBasePtr(&trans_in_x),
188 189
                                nullptr /*beta*/,
                                nullptr /*params_shape_ptr*/,
190 191
                                trans_out_desc.get(),
                                GetBasePtr(&trans_out));
192
      }
193
    } else {
194 195 196 197 198 199 200 201
      MLUCnnl::AdaptivePoolingForward(ctx,
                                      pool_mode,
                                      trans_in_x_desc.get(),
                                      GetBasePtr(&trans_in_x),
                                      trans_out_desc.get(),
                                      GetBasePtr(&trans_out),
                                      nullptr,
                                      nullptr);
202 203 204 205 206
    }
    if (!channel_last) {
      std::vector<int> perm{0, 3, 1, 2};
      TransposeFromMLUTensor<T>(
          ctx, perm, &trans_out, out, false /*need_reshape_or_alloc*/);
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
    }
  }
};

template <typename T, typename IDX_T>
class MLUPoolGradOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto &dev_ctx = ctx.template device_context<platform::MLUDeviceContext>();
    const Tensor *in_x = ctx.Input<Tensor>("X");
    const Tensor *out = ctx.Input<Tensor>("Out");
    const Tensor *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
    Tensor *in_x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
    in_x_grad->mutable_data<T>(ctx.GetPlace());

    std::string pooling_type = ctx.Attr<std::string>("pooling_type");
    std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
    std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
    bool ceil_mode = ctx.Attr<bool>("ceil_mode");
    bool exclusive = ctx.Attr<bool>("exclusive");
    bool adaptive = ctx.Attr<bool>("adaptive");
    std::string data_format = ctx.Attr<std::string>("data_format");
    bool global_pooling = ctx.Attr<bool>("global_pooling");
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");

    const bool channel_last = data_format == "NHWC";

    auto in_x_dims = in_x->dims();
236
    framework::DDim data_dims = phi::slice_ddim(in_x_dims, 2, in_x_dims.size());
237
    if (channel_last) {
238
      data_dims = phi::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
239 240
    }

241 242 243 244 245 246 247
    phi::funcs::UpdatePadding(&paddings,
                              global_pooling,
                              adaptive,
                              padding_algorithm,
                              data_dims,
                              strides,
                              ksize);
248
    if (global_pooling) {
F
From00 已提交
249
      phi::funcs::UpdateKernelSize(&ksize, data_dims);
250 251 252 253 254 255 256 257 258 259 260 261 262 263
    }

    // inputs need with NHWC layout
    framework::Tensor trans_in_x;
    framework::Tensor trans_out;
    framework::Tensor trans_out_grad;
    framework::Tensor trans_in_x_grad;
    if (channel_last) {
      trans_in_x = *in_x;
      trans_out = *out;
      trans_out_grad = *out_grad;
      trans_in_x_grad = *in_x_grad;
    } else {
      std::vector<int> perm{0, 2, 3, 1};
264 265 266 267 268 269
      TransposeFromMLUTensor<T>(
          ctx, perm, in_x, &trans_in_x, true /*need_reshape_or_alloc*/);
      TransposeFromMLUTensor<T>(
          ctx, perm, out, &trans_out, true /*need_reshape_or_alloc*/);
      TransposeFromMLUTensor<T>(
          ctx, perm, out_grad, &trans_out_grad, true /*need_reshape_or_alloc*/);
270
      auto in_x_grad_dims = in_x_grad->dims();
271 272 273 274 275 276
      trans_in_x_grad =
          ctx.AllocateTmpTensor<T, MLUDeviceContext>({in_x_grad_dims[0],
                                                      in_x_grad_dims[2],
                                                      in_x_grad_dims[3],
                                                      in_x_grad_dims[1]},
                                                     dev_ctx);
277
    }
278 279 280 281 282 283 284 285
    MLUCnnlTensorDesc trans_in_x_desc(
        trans_in_x, CNNL_LAYOUT_NHWC, ToCnnlDataType<T>());
    MLUCnnlTensorDesc trans_out_desc(
        trans_out, CNNL_LAYOUT_NHWC, ToCnnlDataType<T>());
    MLUCnnlTensorDesc trans_out_grad_desc(
        trans_out_grad, CNNL_LAYOUT_NHWC, ToCnnlDataType<T>());
    MLUCnnlTensorDesc trans_in_x_grad_desc(
        trans_in_x_grad, CNNL_LAYOUT_NHWC, ToCnnlDataType<T>());
286

287 288
    cnnlPoolingMode_t pool_mode =
        ToCnnlPoolingMode(pooling_type, exclusive, adaptive);
289 290 291 292 293 294 295 296 297 298 299 300 301
    MLUCnnlPoolingDesc pool_desc(pool_mode,
                                 CNNL_NOT_PROPAGATE_NAN,
                                 ksize[0],
                                 ksize[1],
                                 paddings[0],
                                 paddings[1],
                                 paddings[2],
                                 paddings[3],
                                 strides[0],
                                 strides[1],
                                 1 /*row_dilation*/,
                                 1 /*col_dilation*/,
                                 ceil_mode);
302 303 304 305 306

    if (pooling_type == "max") {
      framework::Tensor index_tensor =
          ctx.AllocateTmpTensor<IDX_T, MLUDeviceContext>(trans_out_grad.dims(),
                                                         dev_ctx);
307 308 309 310 311 312 313
      MLUCnnlTensorDesc index_tensor_desc(
          index_tensor, CNNL_LAYOUT_NHWC, ToCnnlDataType<IDX_T>());
      MLUCnnl::PoolingIndex(ctx,
                            pool_desc.get(),
                            trans_in_x_desc.get(),
                            GetBasePtr(&trans_in_x),
                            index_tensor_desc.get(),
314
                            GetBasePtr(&index_tensor));
315
      if (adaptive) {
316 317 318 319 320 321 322 323
        MLUCnnl::AdaptivePoolingBackward(ctx,
                                         pool_mode,
                                         trans_out_grad_desc.get(),
                                         GetBasePtr(&trans_out_grad),
                                         index_tensor_desc.get(),
                                         GetBasePtr(&index_tensor),
                                         trans_in_x_grad_desc.get(),
                                         GetBasePtr(&trans_in_x_grad));
324
      } else {
325 326 327 328 329 330 331 332 333 334 335 336
        MLUCnnl::PoolingBackward(ctx,
                                 pool_desc.get(),
                                 nullptr /*alpha*/,
                                 index_tensor_desc.get(),
                                 GetBasePtr(&index_tensor),
                                 trans_out_grad_desc.get(),
                                 GetBasePtr(&trans_out_grad),
                                 trans_in_x_desc.get(),
                                 GetBasePtr(&trans_in_x),
                                 nullptr /*beta*/,
                                 trans_in_x_grad_desc.get(),
                                 GetBasePtr(&trans_in_x_grad));
337
      }
338
    } else {
339
      if (adaptive) {
340 341 342 343 344 345 346 347
        MLUCnnl::AdaptivePoolingBackward(ctx,
                                         pool_mode,
                                         trans_out_grad_desc.get(),
                                         GetBasePtr(&trans_out_grad),
                                         nullptr /*index_tensor_desc.get()*/,
                                         nullptr /*GetBasePtr(&index_tensor)*/,
                                         trans_in_x_grad_desc.get(),
                                         GetBasePtr(&trans_in_x_grad));
348
      } else {
349 350 351 352 353 354 355 356 357 358 359
        MLUCnnl::PoolingBackward(ctx,
                                 pool_desc.get(),
                                 nullptr /*alpha*/,
                                 nullptr,
                                 nullptr,
                                 trans_out_grad_desc.get(),
                                 GetBasePtr(&trans_out_grad),
                                 nullptr,
                                 nullptr,
                                 nullptr /*beta*/,
                                 trans_in_x_grad_desc.get(),
360 361
                                 GetBasePtr(&trans_in_x_grad));
      }
362 363 364
    }
    if (!channel_last) {
      std::vector<int> perm{0, 3, 1, 2};
365 366 367 368
      TransposeFromMLUTensor<T>(ctx,
                                perm,
                                &trans_in_x_grad,
                                in_x_grad,
369 370 371 372 373 374 375 376 377
                                false /*need_reshape_or_alloc*/);
    }
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
namespace plat = paddle::platform;
378 379
REGISTER_OP_MLU_KERNEL(pool2d,
                       ops::MLUPoolOpKernel<float>,
380
                       ops::MLUPoolOpKernel<plat::float16>);
381 382
REGISTER_OP_MLU_KERNEL(pool2d_grad,
                       ops::MLUPoolGradOpKernel<float, int>,
383
                       ops::MLUPoolGradOpKernel<plat::float16, int16_t>);