layout_image_compute.cc 13.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>
#include <string>
#include "lite/api/paddle_place.h"
18
#include "lite/backends/opencl/cl_half.h"
19 20 21 22 23 24 25 26 27 28 29 30 31
#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/core/type_system.h"
#include "lite/kernels/opencl/image_helper.h"
#include "lite/operators/op_params.h"
#include "lite/utils/cp_logging.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace opencl {

32 33 34 35 36
// [NCHW] -> [ImageDefault]
class LayoutComputeBufferChwToImageDefault
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kAny),
                        DATALAYOUT(kImageDefault)> {
37 38 39 40
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
41 42 43 44
    auto& param = Param<param_t>();
    if (param.process_type == 1) {
      kernel_func_name_ = "buffer_to_image2d_with_pre255";
    }
45
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
46
    auto& context = ctx_->As<OpenCLContext>();
X
xiebaiyuan 已提交
47 48 49 50
    context.cl_context()->AddKernel(kernel_func_name_,
                                    "image/layout_kernel.cl",
                                    build_options_,
                                    time_stamp_);
51 52 53 54
  }

  void Run() override {
    auto& param = Param<param_t>();
55 56 57 58 59 60
    const cl::Buffer* x_data;
    if (param.process_type == 1) {
      x_data = param.x->data<uint8_t, cl::Buffer>();
    } else {
      x_data = param.x->data<float, cl::Buffer>();
    }
61 62
    auto x_dims = param.x->dims();
    auto image_shape = InitImageDimInfoWith(x_dims);
63
    auto* y_data = param.y->mutable_data<half_t, cl::Image2D>(
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
        image_shape["width"], image_shape["height"]);
    auto y_dims = param.y->dims();

    // out info
    std::vector<size_t> new_dims = {1, 1, 1, 1};
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
    }
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;

79
#ifdef LITE_WITH_LOG
80 81 82 83
    VLOG(2) << "param.process_type:" << param.process_type;
    VLOG(2) << "x_dims:" << x_dims;
    VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
    VLOG(2) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
84
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
85 86 87 88 89 90 91 92 93 94
    VLOG(2) << "y_dims:" << y_dims;
    VLOG(2) << "param.y->memory_size():" << param.y->memory_size();
    VLOG(2) << "y image_shape(w,h):" << image_shape["width"] << " "
            << image_shape["height"];
    VLOG(2) << "out_C:" << out_C;
    VLOG(2) << "out_H:" << out_H;
    VLOG(2) << "out_W:" << out_W;
    VLOG(2) << "Stride2:" << Stride2;
    VLOG(2) << "Stride1:" << Stride1;
    VLOG(2) << "Stride0:" << Stride0;
95
#endif
96 97 98 99

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
X
xiebaiyuan 已提交
100
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, *y_data);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_H));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_W));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_C));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride0));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride1));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
    CL_CHECK_FATAL(status);

121
#ifdef LITE_WITH_LOG
122
    VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
123
            << " " << (new_dims[0] * new_dims[2]);
124 125
#endif

126 127 128 129
    auto global_work_size =
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
X
xiebaiyuan 已提交
130

131 132 133 134 135 136
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
X
xiebaiyuan 已提交
137
        nullptr);
138 139 140 141
    CL_CHECK_FATAL(status);
  }

  std::string doc() const override {
142
    return "Trans Layout from cl::Buffer(NCHW) to "
143
           "cl::Image2D(ImageDefault/RGBA), Float ---> FP16";
144 145 146
  }

 private:
X
xiebaiyuan 已提交
147
  std::string time_stamp_{GetTimeStamp()};
148
  std::string kernel_func_name_{"buffer_to_image2d"};
149
  std::string build_options_{"-DCL_DTYPE_float"};
150 151
};

152 153 154
// [ImageDefault] -> [NCHW]
class LayoutComputeImageDefaultToBufferChw
    : public KernelLite<TARGET(kOpenCL), PRECISION(kAny), DATALAYOUT(kNCHW)> {
155 156 157 158
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
159 160 161 162
    auto& param = Param<param_t>();
    if (param.process_type == 1) {
      kernel_func_name_ = "image2d_to_buffer_with_post255";
    }
163
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
164
    auto& context = ctx_->As<OpenCLContext>();
X
xiebaiyuan 已提交
165 166 167 168
    context.cl_context()->AddKernel(kernel_func_name_,
                                    "image/layout_kernel.cl",
                                    build_options_,
                                    time_stamp_);
169 170 171 172
  }

  void Run() override {
    auto& param = Param<param_t>();
173 174 175 176 177 178
    const cl::Buffer* y_data;
    if (param.process_type == 1) {
      y_data = param.y->mutable_data<uint8_t, cl::Buffer>(TARGET(kOpenCL));
    } else {
      y_data = param.y->mutable_data<float, cl::Buffer>(TARGET(kOpenCL));
    }
179
    auto* x_data = param.x->data<half_t, cl::Image2D>();
180
    auto x_dims = param.x->dims();
181
    auto y_dims = param.y->dims();
182
    auto x_image_shape = InitImageDimInfoWith(x_dims);
183 184

    std::vector<size_t> new_dims = {1, 1, 1, 1};
185 186
    for (int j = 0; j < x_dims.size(); ++j) {
      new_dims[4 - x_dims.size() + j] = x_dims[j];
187 188
    }

189
#ifdef LITE_WITH_LOG
190 191 192 193
    VLOG(2) << "param.process_type:" << param.process_type;
    VLOG(2) << "x_dims:" << x_dims;
    VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
    VLOG(2) << "x_image_shape(w,h):" << x_image_shape["width"] << " "
194
            << x_image_shape["height"];
195
    VLOG(2) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
196
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
197 198
    VLOG(2) << "y_dims:" << y_dims;
    VLOG(2) << "param.y->memory_size():" << param.y->memory_size();
199
#endif
200

201 202 203 204 205 206
    size_t C = new_dims[1];
    size_t in_height = new_dims[2];
    size_t in_width = new_dims[3];
    int size_ch = in_height * in_width;
    int size_block = size_ch * 4;
    int size_batch = size_ch * C;
207 208 209 210

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
X
xiebaiyuan 已提交
211
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
212 213 214 215 216
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
217
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_width));
218
    CL_CHECK_FATAL(status);
219
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_height));
220
    CL_CHECK_FATAL(status);
221
    status = kernel.setArg(++arg_idx, *y_data);
222
    CL_CHECK_FATAL(status);
223
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_ch));
224
    CL_CHECK_FATAL(status);
225
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_block));
226
    CL_CHECK_FATAL(status);
227
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_batch));
228
    CL_CHECK_FATAL(status);
229
    status = kernel.setArg(++arg_idx, static_cast<const int>(C));
230
    CL_CHECK_FATAL(status);
231
#ifdef LITE_WITH_LOG
232
    VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
233
            << " " << (new_dims[0] * new_dims[2]);
234
#endif
235
    auto global_work_size =
236 237 238
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
X
xiebaiyuan 已提交
239

240 241 242 243 244 245
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
X
xiebaiyuan 已提交
246
        nullptr);
247 248 249 250
    CL_CHECK_FATAL(status);
  }

  std::string doc() const override {
251
    return "Trans Layout from cl::Image2D(ImageDefault/RGBA) to "
252
           "cl::Buffer(NCHW), FP16 ---> Float";
253 254 255
  }

 private:
X
xiebaiyuan 已提交
256
  std::string time_stamp_{GetTimeStamp()};
257 258
  std::string kernel_func_name_{"image2d_to_buffer"};
  std::string build_options_{"-DCL_DTYPE_float"};
259 260
};

261 262 263 264 265
// [NCHW] -> [ImageDW]
class LayoutComputeBufferChwToImage2DNw
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kFloat),
                        DATALAYOUT(kImageNW)> {
266 267 268 269 270
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
    auto& context = ctx_->As<OpenCLContext>();
X
xiebaiyuan 已提交
271 272 273 274
    context.cl_context()->AddKernel(kernel_func_name_,
                                    "buffer/layout_kernel.cl",
                                    build_options_,
                                    time_stamp_);
275 276 277 278
  }

  void Run() override {
    auto& param = Param<param_t>();
279
    auto* x_data = param.x->data<float, cl::Buffer>();
280 281
    auto x_dims = param.x->dims();

282 283 284 285 286 287 288 289 290
    CHECK(x_dims.size() == 4) << " Tensor dim is not 4.";
    size_t image_width = x_dims[3] * ((x_dims[0] + 3) / 4);
    size_t image_height = x_dims[1] * x_dims[2];

    auto* y_data =
        param.y->mutable_data<float, cl::Image2D>(image_width, image_height);
    auto y_dims = param.y->dims();

    // out info
291
    std::vector<size_t> new_dims = {1, 1, 1, 1};
292 293
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
294 295
    }

296 297 298 299
    const int out_N = new_dims[0];
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
300

301 302 303
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;
304 305 306 307

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
X
xiebaiyuan 已提交
308
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
309 310 311 312 313
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
314
    status = kernel.setArg(++arg_idx, *y_data);
315
    CL_CHECK_FATAL(status);
316
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_H));
317
    CL_CHECK_FATAL(status);
318
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_W));
319
    CL_CHECK_FATAL(status);
320
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_N));
321
    CL_CHECK_FATAL(status);
322
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride0));
323
    CL_CHECK_FATAL(status);
324
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride1));
325
    CL_CHECK_FATAL(status);
326
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
327
    CL_CHECK_FATAL(status);
328

329
    VLOG(2) << "gws:[3D]" << ((out_N + 3) / 4) << " " << out_W << " "
330
            << (out_C * out_H);
331
    auto global_work_size =
332 333 334
        cl::NDRange{static_cast<cl::size_type>((out_N + 3) / 4),  // N blocks
                    static_cast<cl::size_type>(out_W),            // w
                    static_cast<cl::size_type>(out_C * out_H)};   // ch
X
xiebaiyuan 已提交
335

336 337 338 339 340 341
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
X
xiebaiyuan 已提交
342
        nullptr);
343 344 345 346
    CL_CHECK_FATAL(status);
  }

  std::string doc() const override {
347
    return "Trans Layout from cl::Buffer(NCHW) to cl::Image2D(ImageDW/CLNW)";
348 349 350
  }

 private:
X
xiebaiyuan 已提交
351 352
  std::string time_stamp_{GetTimeStamp()};

353 354
  std::string kernel_func_name_{"buffer_to_image2d_nw"};
  std::string build_options_{"-DCL_DTYPE_float "};
355 356 357 358 359 360 361
};

}  // namespace opencl
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

362
// [NCHW] -> [ImageDefault]
363 364 365
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
366
    kAny,
367 368 369
    kImageDefault,
    paddle::lite::kernels::opencl::LayoutComputeBufferChwToImageDefault,
    NCHW_to_ImageDefault)
370 371
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
372
                                      PRECISION(kAny),
373 374 375
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
376
                                       PRECISION(kAny),
377
                                       DATALAYOUT(kImageDefault))})
378 379
    .Finalize();

380
// [ImageDefault] -> [NCHW]
381 382 383
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
384
    kAny,
385
    kNCHW,
386 387
    paddle::lite::kernels::opencl::LayoutComputeImageDefaultToBufferChw,
    ImageDefault_to_NCHW)
388 389
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
390
                                      PRECISION(kAny),
391
                                      DATALAYOUT(kImageDefault))})
392 393
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
394
                                       PRECISION(kAny),
395 396
                                       DATALAYOUT(kNCHW))})
    .Finalize();