conv_kernel.cpp 5.1 KB
Newer Older
L
liuruilong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef CONV_OP

#include "operators/kernel/conv_kernel.h"
Z
zhaojiaying01 已提交
18
#include "operators/kernel/cl/cl-kernel-func/conv_func.h"
L
liuruilong 已提交
19 20 21 22 23 24

namespace paddle_mobile {
namespace operators {

template <>
bool ConvKernel<GPU_CL, float>::Init(ConvParam<GPU_CL> *param) {
L
liuruilong 已提交
25 26 27 28 29 30 31 32 33
  PADDLE_MOBILE_ENFORCE(
      param->Filter()->dims()[2] == param->Filter()->dims()[3] &&
          param->Paddings()[0] == param->Paddings()[1],
      "need equal");

  int offset = static_cast<int>(param->Filter()->dims()[2]) / 2 -
               static_cast<int>(param->Paddings()[1]);
  param->SetOffset(offset);

L
liuruilong 已提交
34 35
  DLOG << " init helper: " << &cl_helper_;
  DLOG << " conv kernel add kernel ~ ";
L
liuruilong 已提交
36 37
  DLOG << " width of one block: " << param->Filter()->dims()[3];
  DLOG << " height of one block: " << param->Filter()->dims()[2];
L
liuruilong 已提交
38 39
  DLOG << " filter dims: " << param->Filter()->dims();

40 41 42
  const std::string conv_kernel_file = "conv_kernel.cl";
  const std::string wino_kernel_file = "winograd_transform.cl";

L
liuruilong 已提交
43
  if (param->Filter()->dims()[2] == 1 && param->Filter()->dims()[3] == 1) {
44
    param->ExecMode() = ConvParam<GPU_CL>::EXEC_SLIDINGWINDOW1x1_FLOAT;
Z
zhaojiaying01 已提交
45 46
    param->Filter()->InitNImage(cl_helper_.CLContext(),
                                cl_helper_.CLCommandQueue());
47

48 49 50 51 52
    if (param->Input()->dims()[1] % 4 == 0) {
      this->cl_helper_.AddKernel("conv_1x1_simple", conv_kernel_file);
    } else {
      this->cl_helper_.AddKernel("conv_1x1_wrapped", conv_kernel_file);
    }
Z
zhaojiaying01 已提交
53
    DLOG << "conv 1x1";
L
liuruilong 已提交
54

Z
zhaojiaying01 已提交
55
  } else if (param->Filter()->dims()[1] == 1 &&
Y
yangfei 已提交
56 57
             param->Input()->dims()[1] == param->Output()->dims()[1] &&
             param->Filter()->dims()[2] == 3) {
Z
zhaojiaying01 已提交
58 59
    param->Filter()->InitDWImage(cl_helper_.CLContext(),
                                 cl_helper_.CLCommandQueue());
Z
zhaojiaying01 已提交
60 61 62 63 64 65 66
    if (param->Strides()[0] == 1 && param->Dilations()[0] == 1) {
      param->ExecMode() = ConvParam<GPU_CL>::EXEC_DEPTHWISE3x3S1_FLOAT;
      this->cl_helper_.AddKernel("depth_conv_3x3s1", conv_kernel_file);
    } else {
      param->ExecMode() = ConvParam<GPU_CL>::EXEC_DEPTHWISE3x3_FLOAT;
      this->cl_helper_.AddKernel("depth_conv_3x3", conv_kernel_file);
    }
Z
zhaojiaying01 已提交
67
    DLOG << "depth_conv 3x3";
L
liuruilong 已提交
68

L
liuruilong 已提交
69 70
  } else if (param->Filter()->dims()[2] == 3 &&
             param->Filter()->dims()[3] == 3) {
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
    //    if (param->Strides()[0] == param->Strides()[1] &&
    //        param->Strides()[0] == 1 && param->Input()->dims()[2] >= 32) {
    //      param->ExecMode() = ConvParam<GPU_CL>::EXEC_WINOGRAD3X3_FLOAT;
    //      this->cl_helper_.AddKernel("winograd_filter_transform_2x2",
    //                                 wino_kernel_file);
    //      this->cl_helper_.AddKernel("winograd_input_transform_2x2",
    //                                 wino_kernel_file);
    //      this->cl_helper_.AddKernel("matmul", "matmul.cl");
    //      this->cl_helper_.AddKernel("winograd_output_transform_2x2",
    //                                 wino_kernel_file);
    //
    //      winograd_transform_weight<4, 3>(&this->cl_helper_, param->Filter());
    //
    //    } else {
    param->ExecMode() = ConvParam<GPU_CL>::EXEC_SLIDINGWINDOW3x3_FLOAT;
Z
zhaojiaying01 已提交
86 87
    param->Filter()->InitCLImage(cl_helper_.CLContext(),
                                 cl_helper_.CLCommandQueue());
88 89 90

    this->cl_helper_.AddKernel("conv_3x3", conv_kernel_file);
    //    }
Z
zhaojiaying01 已提交
91
    DLOG << "conv 3x3";
92 93 94 95 96
  } else if (param->Filter()->dims()[2] == 7 &&
             param->Filter()->dims()[3] == 7) {
    param->ExecMode() = ConvParam<GPU_CL>::EXEC_SLIDINGWINDOW7x7_FLOAT;
    param->Filter()->InitCLImage(cl_helper_.CLContext(),
                                 cl_helper_.CLCommandQueue());
L
liuruilong 已提交
97

98 99 100
    this->cl_helper_.AddKernel("conv_7x7", conv_kernel_file);
    //    }
    DLOG << "conv 7x7";
L
liuruilong 已提交
101 102 103
  } else {
    PADDLE_MOBILE_THROW_EXCEPTION(" not support ");
  }
L
liuruilong 已提交
104

L
liuruilong 已提交
105 106 107 108 109
  return true;
}

template <>
void ConvKernel<GPU_CL, float>::Compute(const ConvParam<GPU_CL> &param) {
110 111 112 113 114 115 116
  switch (param.ExecMode()) {
    case ConvParam<GPU_CL>::EXEC_WINOGRAD3X3_FLOAT:
      WinogradConv3x3<4, 3>(&this->cl_helper_, param);
      break;
    case ConvParam<GPU_CL>::EXEC_SLIDINGWINDOW1x1_FLOAT:
    case ConvParam<GPU_CL>::EXEC_SLIDINGWINDOW3x3_FLOAT:
    case ConvParam<GPU_CL>::EXEC_DEPTHWISE3x3_FLOAT:
117
    case ConvParam<GPU_CL>::EXEC_SLIDINGWINDOW7x7_FLOAT:
118 119
      ConvAddBnRelu(&this->cl_helper_, param);
      break;
Z
zhaojiaying01 已提交
120 121 122
    case ConvParam<GPU_CL>::EXEC_DEPTHWISE3x3S1_FLOAT:
      DWConvAddBnRelu(&this->cl_helper_, param);
      break;
123 124 125 126
    default:
      PADDLE_MOBILE_THROW_EXCEPTION("Invalid convolution execute mode %d",
                                    param.ExecMode());
  }
L
liuruilong 已提交
127 128 129 130 131 132 133 134
}

template class ConvKernel<GPU_CL, float>;

}  // namespace operators
}  // namespace paddle_mobile

#endif