pool_image_compute.cc 7.2 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <vector>
16

17
#include "lite/backends/opencl/cl_half.h"
18
#include "lite/backends/opencl/cl_include.h"
Y
Yan Chunwei 已提交
19 20
#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"
21
#include "lite/kernels/opencl/image_helper.h"
Y
Yan Chunwei 已提交
22 23 24 25 26 27 28 29 30
#include "lite/operators/op_params.h"
#include "lite/utils/replace_stl/stream.h"
#include "lite/utils/string.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace opencl {

31
class PoolComputeImage2D : public KernelLite<TARGET(kOpenCL),
32
                                             PRECISION(kFP16),
33
                                             DATALAYOUT(kImageDefault)> {
34 35 36
 public:
  using param_t = operators::PoolParam;

37
  std::string doc() const override { return "Pool using cl::Image2D, kFP16"; }
38

39 40
  void PrepareForRun() override {
    const auto& param = *param_.get_mutable<param_t>();
41

42
    kernel_func_name_ += param.pooling_type;
43 44 45 46
    const bool global_pooling = param.global_pooling;
    if (global_pooling) {
      kernel_func_name_ += "_global";
    }
47
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
48 49
    auto& context = ctx_->As<OpenCLContext>();
    context.cl_context()->AddKernel(
50
        kernel_func_name_, "image/pool_kernel.cl", build_options_, time_stamp_);
51 52 53 54 55 56 57 58 59 60 61
  }

  void Run() override {
    const auto& param = *param_.get_mutable<param_t>();
    const auto& in_dims = param.x->dims();
    const auto& out_dims = param.output->dims();
    const std::string pooling_type = param.pooling_type;
    const bool global_pooling = param.global_pooling;
    std::vector<int> paddings = *param.paddings;
    std::vector<int> strides = param.strides;
    std::vector<int> ksize = param.ksize;
62 63

#ifndef LITE_SHUTDOWN_LOG
64 65 66 67
    VLOG(4) << "global_pooling: " << global_pooling;
    VLOG(4) << "pooling_type: " << pooling_type;
    VLOG(4) << "paddings : " << paddings[0] << "  " << paddings[1] << "  "
            << paddings[2] << "  " << paddings[3] << "  ";
68 69
#endif

70 71 72 73 74 75 76
    if (global_pooling) {
      for (size_t i = 0; i < ksize.size(); ++i) {
        paddings[2 * i] = 0;
        paddings[2 * i + 1] = 0;
        ksize[i] = static_cast<int>(in_dims[i + 2]);
      }
    }
77 78

#ifndef LITE_SHUTDOWN_LOG
79 80 81 82 83 84 85 86 87 88 89 90
    VLOG(4) << "in_dims : [" << in_dims.size() << "]" << in_dims[0] << "  "
            << in_dims[1] << "  " << in_dims[2] << "  " << in_dims[3];
    VLOG(4) << "out_dims : [" << out_dims.size() << "]" << out_dims[0] << "  "
            << out_dims[1] << "  " << out_dims[2] << "  " << out_dims[3];
    VLOG(4) << "paddings fixed : " << paddings[0] << "  " << paddings[1] << "  "
            << paddings[2] << "  " << paddings[3] << "  ";
    VLOG(4) << "strides : [" << strides.size() << "]" << strides[0] << "  "
            << strides[1];
    VLOG(4) << "ksize : [" << ksize.size() << "]" << ksize[0] << "  "
            << ksize[1] << "  " << ksize[2] << "  " << ksize[3];
    VLOG(4) << "paddings : [" << paddings.size() << "]" << paddings[0] << "  "
            << paddings[1] << "  " << paddings[2] << "  " << paddings[3];
91 92
#endif

93 94 95 96 97 98 99 100 101
    bool pads_equal =
        (paddings[0] == paddings[1]) && (paddings[2] == paddings[3]);
    if (!pads_equal) {
      LOG(FATAL)
          << "padding requires pad_left == pad_right, pad_top == pad_bottom";
    }
    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);

102
    auto* x_img = param.x->data<half_t, cl::Image2D>();
103
    //    VLOG(4) << "x_image" << x_img;
104 105

    auto out_image_shape = InitImageDimInfoWith(out_dims);
106
#ifndef LITE_SHUTDOWN_LOG
107 108
    VLOG(4) << "out_image_shape = " << out_image_shape["width"] << " "
            << out_image_shape["height"];
109
#endif
110
    auto* out_img = param.output->mutable_data<half_t, cl::Image2D>(
111
        out_image_shape["width"], out_image_shape["height"]);
112
    //    VLOG(4) << "out_image" << out_img;
113 114

    STL::stringstream kernel_key;
115
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
116 117 118 119 120 121
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int c_block = (out_dims[1] + 3) / 4;
    int w = out_dims[3];
    int nh = out_dims[0] * out_dims[2];
    auto global_work_size = cl::NDRange(c_block, w, nh);
122
#ifndef LITE_SHUTDOWN_LOG
123 124
    VLOG(4) << "global_work_size : [" << 3 << "]" << c_block << "  " << w
            << "  " << nh << "  ";
125
#endif
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
    cl_int status;
    int arg_idx = 0;
    status = kernel.setArg(arg_idx, *x_img);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, *out_img);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_dims[2]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_dims[3]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_dims[2]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_dims[3]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(ksize[0]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(ksize[1]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(strides[0]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(strides[1]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(paddings[2]));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(paddings[0]));
    CL_CHECK_FATAL(status);

    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
    context.cl_wait_list()->emplace(out_img, event_);
  }

 private:
  std::string kernel_func_name_{"pool_"};
166
  std::string build_options_{"-DCL_DTYPE_half"};
167
  std::string time_stamp_{GetTimeStamp()};
168 169 170
  std::shared_ptr<cl::Event> event_{new cl::Event};
};

Y
Yan Chunwei 已提交
171 172 173 174 175
}  // namespace opencl
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

176 177
REGISTER_LITE_KERNEL(pool2d,
                     kOpenCL,
178
                     kFP16,
179
                     kImageDefault,
180 181 182 183
                     paddle::lite::kernels::opencl::PoolComputeImage2D,
                     image2d)
    .BindInput("X",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
184
                                      PRECISION(kFP16),
185
                                      DATALAYOUT(kImageDefault))})
186 187
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
188
                                       PRECISION(kFP16),
189
                                       DATALAYOUT(kImageDefault))})
190
    .Finalize();