layout_image_compute.cc 15.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>
#include <string>
#include "lite/api/paddle_place.h"
18
#include "lite/backends/opencl/cl_half.h"
19
#include "lite/backends/opencl/cl_utility.h"
20 21 22 23 24 25 26 27
#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/core/type_system.h"
#include "lite/kernels/opencl/image_helper.h"
#include "lite/operators/op_params.h"
#include "lite/utils/cp_logging.h"

28 29
#undef LITE_WITH_LOG

30 31 32 33 34
namespace paddle {
namespace lite {
namespace kernels {
namespace opencl {

35 36 37 38 39
// [NCHW] -> [ImageDefault]
class LayoutComputeBufferChwToImageDefault
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kAny),
                        DATALAYOUT(kImageDefault)> {
40 41 42 43
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
44 45 46 47
    auto& param = Param<param_t>();
    if (param.process_type == 1) {
      kernel_func_name_ = "buffer_to_image2d_with_pre255";
    }
48
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
49
    auto& context = ctx_->As<OpenCLContext>();
X
xiebaiyuan 已提交
50 51 52 53
    context.cl_context()->AddKernel(kernel_func_name_,
                                    "image/layout_kernel.cl",
                                    build_options_,
                                    time_stamp_);
54 55
  }

56 57 58 59 60 61 62 63
#ifdef LITE_WITH_PROFILE
  void SetProfileRuntimeKernelInfo(paddle::lite::profile::OpCharacter* ch) {
    ch->kernel_func_name = kernel_func_name_;
    ch->cl_event =
        event_;  // `event_` defined in `kernel.h`, valid after kernel::Run
  }
#endif

64 65
  void Run() override {
    auto& param = Param<param_t>();
66 67 68 69 70 71
    const cl::Buffer* x_data;
    if (param.process_type == 1) {
      x_data = param.x->data<uint8_t, cl::Buffer>();
    } else {
      x_data = param.x->data<float, cl::Buffer>();
    }
72 73
    auto x_dims = param.x->dims();
    auto image_shape = InitImageDimInfoWith(x_dims);
74
    auto* y_data = param.y->mutable_data<half_t, cl::Image2D>(
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
        image_shape["width"], image_shape["height"]);
    auto y_dims = param.y->dims();

    // out info
    std::vector<size_t> new_dims = {1, 1, 1, 1};
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
    }
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;

90
#ifdef LITE_WITH_LOG
91 92 93 94
    VLOG(2) << "param.process_type:" << param.process_type;
    VLOG(2) << "x_dims:" << x_dims;
    VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
    VLOG(2) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
95
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
96 97 98 99 100 101 102 103 104 105
    VLOG(2) << "y_dims:" << y_dims;
    VLOG(2) << "param.y->memory_size():" << param.y->memory_size();
    VLOG(2) << "y image_shape(w,h):" << image_shape["width"] << " "
            << image_shape["height"];
    VLOG(2) << "out_C:" << out_C;
    VLOG(2) << "out_H:" << out_H;
    VLOG(2) << "out_W:" << out_W;
    VLOG(2) << "Stride2:" << Stride2;
    VLOG(2) << "Stride1:" << Stride1;
    VLOG(2) << "Stride0:" << Stride0;
106
#endif
107 108 109 110

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
X
xiebaiyuan 已提交
111
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, *y_data);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_H));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_W));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_C));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride0));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride1));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
    CL_CHECK_FATAL(status);

132
#ifdef LITE_WITH_LOG
133
    VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
134
            << " " << (new_dims[0] * new_dims[2]);
135 136
#endif

137 138 139 140
    auto global_work_size =
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
X
xiebaiyuan 已提交
141

142 143 144 145 146 147 148
    status = EnqueueNDRangeKernel(context,
                                  kernel,
                                  cl::NullRange,
                                  global_work_size,
                                  cl::NullRange,
                                  nullptr,
                                  event_);
149 150 151 152
    CL_CHECK_FATAL(status);
  }

  std::string doc() const override {
153
    return "Trans Layout from cl::Buffer(NCHW) to "
154
           "cl::Image2D(ImageDefault/RGBA), Float ---> FP16";
155 156 157
  }

 private:
X
xiebaiyuan 已提交
158
  std::string time_stamp_{GetTimeStamp()};
159
  std::string kernel_func_name_{"buffer_to_image2d"};
160
  std::string build_options_{"-DCL_DTYPE_float"};
161 162
};

163 164 165
// [ImageDefault] -> [NCHW]
class LayoutComputeImageDefaultToBufferChw
    : public KernelLite<TARGET(kOpenCL), PRECISION(kAny), DATALAYOUT(kNCHW)> {
166 167 168 169
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
170 171 172 173
    auto& param = Param<param_t>();
    if (param.process_type == 1) {
      kernel_func_name_ = "image2d_to_buffer_with_post255";
    }
174
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
175
    auto& context = ctx_->As<OpenCLContext>();
X
xiebaiyuan 已提交
176 177 178 179
    context.cl_context()->AddKernel(kernel_func_name_,
                                    "image/layout_kernel.cl",
                                    build_options_,
                                    time_stamp_);
180 181
  }

182 183 184 185 186 187 188 189
#ifdef LITE_WITH_PROFILE
  void SetProfileRuntimeKernelInfo(paddle::lite::profile::OpCharacter* ch) {
    ch->kernel_func_name = kernel_func_name_;
    ch->cl_event =
        event_;  // `event_` defined in `kernel.h`, valid after kernel::Run
  }
#endif

190 191
  void Run() override {
    auto& param = Param<param_t>();
192 193 194 195 196 197
    const cl::Buffer* y_data;
    if (param.process_type == 1) {
      y_data = param.y->mutable_data<uint8_t, cl::Buffer>(TARGET(kOpenCL));
    } else {
      y_data = param.y->mutable_data<float, cl::Buffer>(TARGET(kOpenCL));
    }
198
    auto* x_data = param.x->data<half_t, cl::Image2D>();
199
    auto x_dims = param.x->dims();
200
    auto y_dims = param.y->dims();
201
    auto x_image_shape = InitImageDimInfoWith(x_dims);
202 203

    std::vector<size_t> new_dims = {1, 1, 1, 1};
204 205
    for (int j = 0; j < x_dims.size(); ++j) {
      new_dims[4 - x_dims.size() + j] = x_dims[j];
206 207
    }

208
#ifdef LITE_WITH_LOG
209 210 211 212
    VLOG(2) << "param.process_type:" << param.process_type;
    VLOG(2) << "x_dims:" << x_dims;
    VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
    VLOG(2) << "x_image_shape(w,h):" << x_image_shape["width"] << " "
213
            << x_image_shape["height"];
214
    VLOG(2) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
215
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
216 217
    VLOG(2) << "y_dims:" << y_dims;
    VLOG(2) << "param.y->memory_size():" << param.y->memory_size();
218
#endif
219

220 221 222 223 224 225
    size_t C = new_dims[1];
    size_t in_height = new_dims[2];
    size_t in_width = new_dims[3];
    int size_ch = in_height * in_width;
    int size_block = size_ch * 4;
    int size_batch = size_ch * C;
226 227 228 229

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
X
xiebaiyuan 已提交
230
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
231 232 233 234 235
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
236
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_width));
237
    CL_CHECK_FATAL(status);
238
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_height));
239
    CL_CHECK_FATAL(status);
240
    status = kernel.setArg(++arg_idx, *y_data);
241
    CL_CHECK_FATAL(status);
242
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_ch));
243
    CL_CHECK_FATAL(status);
244
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_block));
245
    CL_CHECK_FATAL(status);
246
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_batch));
247
    CL_CHECK_FATAL(status);
248
    status = kernel.setArg(++arg_idx, static_cast<const int>(C));
249
    CL_CHECK_FATAL(status);
250
#ifdef LITE_WITH_LOG
251
    VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
252
            << " " << (new_dims[0] * new_dims[2]);
253
#endif
254
    auto global_work_size =
255 256 257
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
X
xiebaiyuan 已提交
258

259 260 261 262 263 264 265
    status = EnqueueNDRangeKernel(context,
                                  kernel,
                                  cl::NullRange,
                                  global_work_size,
                                  cl::NullRange,
                                  nullptr,
                                  event_);
266 267 268 269
    CL_CHECK_FATAL(status);
  }

  std::string doc() const override {
270
    return "Trans Layout from cl::Image2D(ImageDefault/RGBA) to "
271
           "cl::Buffer(NCHW), FP16 ---> Float";
272 273 274
  }

 private:
X
xiebaiyuan 已提交
275
  std::string time_stamp_{GetTimeStamp()};
276 277
  std::string kernel_func_name_{"image2d_to_buffer"};
  std::string build_options_{"-DCL_DTYPE_float"};
278 279
};

280 281 282 283 284
// [NCHW] -> [ImageDW]
class LayoutComputeBufferChwToImage2DNw
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kFloat),
                        DATALAYOUT(kImageNW)> {
285 286 287 288 289
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
    auto& context = ctx_->As<OpenCLContext>();
X
xiebaiyuan 已提交
290 291 292 293
    context.cl_context()->AddKernel(kernel_func_name_,
                                    "buffer/layout_kernel.cl",
                                    build_options_,
                                    time_stamp_);
294 295
  }

296 297 298 299 300 301 302 303
#ifdef LITE_WITH_PROFILE
  void SetProfileRuntimeKernelInfo(paddle::lite::profile::OpCharacter* ch) {
    ch->kernel_func_name = kernel_func_name_;
    ch->cl_event =
        event_;  // `event_` defined in `kernel.h`, valid after kernel::Run
  }
#endif

304 305
  void Run() override {
    auto& param = Param<param_t>();
306
    auto* x_data = param.x->data<float, cl::Buffer>();
307 308
    auto x_dims = param.x->dims();

309 310 311 312 313 314 315 316 317
    CHECK(x_dims.size() == 4) << " Tensor dim is not 4.";
    size_t image_width = x_dims[3] * ((x_dims[0] + 3) / 4);
    size_t image_height = x_dims[1] * x_dims[2];

    auto* y_data =
        param.y->mutable_data<float, cl::Image2D>(image_width, image_height);
    auto y_dims = param.y->dims();

    // out info
318
    std::vector<size_t> new_dims = {1, 1, 1, 1};
319 320
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
321 322
    }

323 324 325 326
    const int out_N = new_dims[0];
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
327

328 329 330
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;
331 332 333 334

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
X
xiebaiyuan 已提交
335
    kernel_key << kernel_func_name_ << build_options_ << time_stamp_;
336 337 338 339 340
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
341
    status = kernel.setArg(++arg_idx, *y_data);
342
    CL_CHECK_FATAL(status);
343
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_H));
344
    CL_CHECK_FATAL(status);
345
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_W));
346
    CL_CHECK_FATAL(status);
347
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_N));
348
    CL_CHECK_FATAL(status);
349
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride0));
350
    CL_CHECK_FATAL(status);
351
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride1));
352
    CL_CHECK_FATAL(status);
353
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
354
    CL_CHECK_FATAL(status);
355

356
    VLOG(2) << "gws:[3D]" << ((out_N + 3) / 4) << " " << out_W << " "
357
            << (out_C * out_H);
358
    auto global_work_size =
359 360 361
        cl::NDRange{static_cast<cl::size_type>((out_N + 3) / 4),  // N blocks
                    static_cast<cl::size_type>(out_W),            // w
                    static_cast<cl::size_type>(out_C * out_H)};   // ch
X
xiebaiyuan 已提交
362

363 364 365 366 367 368 369
    status = EnqueueNDRangeKernel(context,
                                  kernel,
                                  cl::NullRange,
                                  global_work_size,
                                  cl::NullRange,
                                  nullptr,
                                  event_);
370 371 372 373
    CL_CHECK_FATAL(status);
  }

  std::string doc() const override {
374
    return "Trans Layout from cl::Buffer(NCHW) to cl::Image2D(ImageDW/CLNW)";
375 376 377
  }

 private:
X
xiebaiyuan 已提交
378 379
  std::string time_stamp_{GetTimeStamp()};

380 381
  std::string kernel_func_name_{"buffer_to_image2d_nw"};
  std::string build_options_{"-DCL_DTYPE_float "};
382 383 384 385 386 387 388
};

}  // namespace opencl
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

389
// [NCHW] -> [ImageDefault]
390 391 392
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
393
    kAny,
394 395 396
    kImageDefault,
    paddle::lite::kernels::opencl::LayoutComputeBufferChwToImageDefault,
    NCHW_to_ImageDefault)
397 398
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
399
                                      PRECISION(kAny),
400 401 402
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
403
                                       PRECISION(kAny),
404
                                       DATALAYOUT(kImageDefault))})
405 406
    .Finalize();

407
// [ImageDefault] -> [NCHW]
408 409 410
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
411
    kAny,
412
    kNCHW,
413 414
    paddle::lite::kernels::opencl::LayoutComputeImageDefaultToBufferChw,
    ImageDefault_to_NCHW)
415 416
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
417
                                      PRECISION(kAny),
418
                                      DATALAYOUT(kImageDefault))})
419 420
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
421
                                       PRECISION(kAny),
422 423
                                       DATALAYOUT(kNCHW))})
    .Finalize();
424
#define LITE_WITH_LOG