conv_add_kernel.cpp 4.5 KB
Newer Older
L
liuruilong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef FUSION_CONVADD_OP

#include "operators/kernel/conv_add_kernel.h"

namespace paddle_mobile {
namespace operators {

template <>
bool ConvAddKernel<GPU_CL, float>::Init(FusionConvAddParam<GPU_CL> *param) {
L
liuruilong 已提交
24
  PADDLE_MOBILE_ENFORCE(
L
liuruilong 已提交
25
      param->Filter()->dims()[2] == param->Filter()->dims()[3] &&
L
liuruilong 已提交
26
          param->Paddings()[0] == param->Paddings()[1],
L
liuruilong 已提交
27
      "need equal");
L
liuruilong 已提交
28 29 30 31
  param->Filter()->InitCLImage(cl_helper_.CLContext(),
                               this->cl_helper_.CLCommandQueue());
  param->Bias()->InitCLImage(cl_helper_.CLContext(),
                             this->cl_helper_.CLCommandQueue());
L
liuruilong 已提交
32

L
liuruilong 已提交
33 34 35 36 37 38
  int offset = static_cast<int>(param->Filter()->dims()[2]) / 2 -
               static_cast<int>(param->Paddings()[1]);
  param->SetOffset(offset);

  if (param->Filter()->WidthOfOneBlock() == 1 &&
      param->Filter()->HeightOfOneBlock() == 1) {
Y
yangfei 已提交
39
    this->cl_helper_.AddKernel("conv_1x1", "conv_add_kernel.cl");
L
liuruilong 已提交
40
  } else if (param->Filter()->dims()[1] == 1) {
Y
yangfei 已提交
41
    this->cl_helper_.AddKernel("depth_conv_3x3", "conv_add_kernel.cl");
L
liuruilong 已提交
42 43
  } else if (param->Filter()->WidthOfOneBlock() == 3 &&
             param->Filter()->HeightOfOneBlock() == 3) {
Y
yangfei 已提交
44
    this->cl_helper_.AddKernel("conv_3x3", "conv_add_kernel.cl");
L
liuruilong 已提交
45 46 47 48
  } else {
    PADDLE_MOBILE_THROW_EXCEPTION(" not support ");
  }

L
liuruilong 已提交
49 50 51 52 53
  return true;
}

template <>
void ConvAddKernel<GPU_CL, float>::Compute(
L
liuruilong 已提交
54 55 56 57 58 59 60 61 62
    const FusionConvAddParam<GPU_CL> &param) {
  auto kernel = this->cl_helper_.KernelAt(0);
  auto default_work_size = this->cl_helper_.DefaultWorkSize(*param.Output());
  int c_block = default_work_size[0];
  int w = default_work_size[1];
  int nh = default_work_size[2];
  auto input = param.Input()->GetCLImage();
  auto filter = param.Filter()->GetCLImage();
  auto biase = param.Bias()->GetCLImage();
L
liuruilong 已提交
63
  auto output = param.Output()->GetCLImage();
L
liuruilong 已提交
64 65 66
  int stride = param.Strides()[0];
  int offset = param.Offset();
  int input_c = param.Input()->CBlock();
L
liuruilong 已提交
67
  int dilation = param.Dilations()[0];
L
liuruilong 已提交
68 69
  int input_width = param.Input()->WidthOfOneBlock();
  int input_height = param.Input()->HeightOfOneBlock();
L
liuruilong 已提交
70 71
  int output_width = param.Output()->WidthOfOneBlock();
  int output_height = param.Output()->HeightOfOneBlock();
L
liuruilong 已提交
72

L
liuruilong 已提交
73 74 75
  cl_int status;

  status = clSetKernelArg(kernel, 0, sizeof(int), &c_block);
L
liuruilong 已提交
76 77
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
78
  status = clSetKernelArg(kernel, 1, sizeof(int), &w);
L
liuruilong 已提交
79 80
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
81
  status = clSetKernelArg(kernel, 2, sizeof(int), &nh);
L
liuruilong 已提交
82 83
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
84
  status = clSetKernelArg(kernel, 3, sizeof(cl_mem), &input);
L
liuruilong 已提交
85 86
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
87
  status = clSetKernelArg(kernel, 4, sizeof(cl_mem), &filter);
L
liuruilong 已提交
88 89
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
90
  status = clSetKernelArg(kernel, 5, sizeof(cl_mem), &biase);
L
liuruilong 已提交
91 92
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
93
  status = clSetKernelArg(kernel, 6, sizeof(cl_mem), &output);
L
liuruilong 已提交
94 95
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
96
  status = clSetKernelArg(kernel, 7, sizeof(int), &stride);
L
liuruilong 已提交
97 98
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
99
  status = clSetKernelArg(kernel, 8, sizeof(int), &offset);
L
liuruilong 已提交
100 101
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
102
  status = clSetKernelArg(kernel, 9, sizeof(int), &input_c);
L
liuruilong 已提交
103 104
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
105
  status = clSetKernelArg(kernel, 10, sizeof(int), &dilation);
L
liuruilong 已提交
106 107
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
108
  status = clSetKernelArg(kernel, 11, sizeof(int), &input_width);
L
liuruilong 已提交
109 110
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
111
  status = clSetKernelArg(kernel, 12, sizeof(int), &input_height);
L
liuruilong 已提交
112 113
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
114
  status = clSetKernelArg(kernel, 13, sizeof(int), &output_width);
L
liuruilong 已提交
115
  CL_CHECK_ERRORS(status);
L
liuruilong 已提交
116

L
liuruilong 已提交
117
  status = clSetKernelArg(kernel, 14, sizeof(int), &output_height);
L
liuruilong 已提交
118 119
  CL_CHECK_ERRORS(status);

L
liuruilong 已提交
120 121 122
  cl_event out_event = param.Output()->GetClEvent();
  cl_event wait_event = param.Input()->GetClEvent();

L
liuruilong 已提交
123
  status =
L
liuruilong 已提交
124
      clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(), NULL,
L
liuruilong 已提交
125
                             default_work_size.data(), NULL, 1, &wait_event, &out_event);
L
liuruilong 已提交
126
  CL_CHECK_ERRORS(status);
L
liuruilong 已提交
127
}
L
liuruilong 已提交
128 129 130 131 132 133 134

template class ConvAddKernel<GPU_CL, float>;

}  // namespace operators
}  // namespace paddle_mobile

#endif