pool_kernel.cpp 2.8 KB
Newer Older
Z
zhaojiaying01 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef POOL_OP

#include "operators/kernel/pool_kernel.h"

namespace paddle_mobile {
namespace operators {

template <>
bool PoolKernel<GPU_CL, float>::Init(PoolParam<GPU_CL> *param) {
24 25
  std::string pooling_type = param->PoolingType();
  this->cl_helper_.AddKernel("pool_" + pooling_type, "pool_kernel.cl");
Z
zhaojiaying01 已提交
26 27 28 29
  return true;
}

template <>
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
void PoolKernel<GPU_CL, float>::Compute(const PoolParam<GPU_CL> &param) {
  auto kernel = this->cl_helper_.KernelAt(0);
  auto default_work_size = this->cl_helper_.DefaultWorkSize(*param.Output());

  auto input = param.Input()->GetCLImage();
  auto out = param.Output()->GetCLImage();

  const int in_height = param.Input()->HeightOfOneBlock();
  const int in_width = param.Input()->WidthOfOneBlock();
  const int out_height = param.Output()->HeightOfOneBlock();
  const int out_width = param.Output()->WidthOfOneBlock();

  std::string pooling_type = param.PoolingType();
  std::vector<int> ksize = param.Ksize();
  std::vector<int> strides = param.Strides();
  std::vector<int> paddings = param.Paddings();
  const int pad_top = paddings[0];
  const int pad_left = paddings[1];
  const int stride_h = strides[0];
  const int stride_w = strides[1];
  const int ksize_h = ksize[0];
  const int ksize_w = ksize[1];

  clSetKernelArg(kernel, 0, sizeof(cl_int), &in_height);
  clSetKernelArg(kernel, 1, sizeof(cl_int), &in_width);
  clSetKernelArg(kernel, 2, sizeof(cl_int), &out_height);
  clSetKernelArg(kernel, 3, sizeof(cl_int), &out_width);
  clSetKernelArg(kernel, 4, sizeof(cl_int), &pad_top);
  clSetKernelArg(kernel, 5, sizeof(cl_int), &pad_left);
  clSetKernelArg(kernel, 6, sizeof(cl_int), &stride_h);
  clSetKernelArg(kernel, 7, sizeof(cl_int), &stride_w);
  clSetKernelArg(kernel, 8, sizeof(cl_int), &ksize_h);
  clSetKernelArg(kernel, 9, sizeof(cl_int), &ksize_w);
  clSetKernelArg(kernel, 10, sizeof(cl_mem), &input);
  clSetKernelArg(kernel, 11, sizeof(cl_mem), &out);

Z
zhaojiaying01 已提交
66 67
  cl_event out_event = param.Output()->GetClEvent();
  cl_event wait_event = param.Input()->GetClEvent();
68
  clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, 3, NULL,
Z
zhaojiaying01 已提交
69
                         default_work_size.data(), NULL, 1, &wait_event, &out_event);
70
}
Z
zhaojiaying01 已提交
71 72 73 74 75 76 77

template class PoolKernel<GPU_CL, float>;

}  // namespace operators
}  // namespace paddle_mobile

#endif