layout_compute.cc 14.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>
#include <string>
#include "lite/api/paddle_place.h"
#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/core/type_system.h"
#include "lite/kernels/opencl/image_helper.h"
#include "lite/operators/op_params.h"
#include "lite/utils/cp_logging.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace opencl {

31 32 33 34 35
// [NCHW] -> [ImageDefault]
class LayoutComputeBufferChwToImageDefault
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kAny),
                        DATALAYOUT(kImageDefault)> {
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
    auto& context = ctx_->As<OpenCLContext>();
    context.cl_context()->AddKernel(
        kernel_func_name_, "buffer/layout_kernel.cl", build_options_);
  }

  void Run() override {
    auto& param = Param<param_t>();
    auto* x_data = param.x->data<float, cl::Buffer>();
    auto x_dims = param.x->dims();
    auto image_shape = InitImageDimInfoWith(x_dims);
    auto* y_data = param.y->mutable_data<float, cl::Image2D>(
        image_shape["width"], image_shape["height"]);
    auto y_dims = param.y->dims();

    // out info
    std::vector<size_t> new_dims = {1, 1, 1, 1};
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
    }
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;

    VLOG(4) << "x_dims[" << x_dims.size() << "D]:" << x_dims[0] << " "
            << x_dims[1] << " " << x_dims[2] << " " << x_dims[3];
    VLOG(4) << "y_dims[" << y_dims.size() << "D]:" << y_dims[0] << " "
            << y_dims[1] << " " << y_dims[2] << " " << y_dims[3];
    VLOG(4) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
    VLOG(4) << "out_C:" << out_C;
    VLOG(4) << "out_H:" << out_H;
    VLOG(4) << "out_W:" << out_W;
    VLOG(4) << "Stride2:" << Stride2;
    VLOG(4) << "Stride1:" << Stride1;
    VLOG(4) << "Stride0:" << Stride0;

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
    kernel_key << kernel_func_name_ << build_options_;
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, *y_data);
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_H));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_W));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_C));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride0));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride1));
    CL_CHECK_FATAL(status);
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
    CL_CHECK_FATAL(status);

    VLOG(4) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
            << " " << (new_dims[0] * new_dims[2]);
    auto global_work_size =
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
    // TODO(ysh329): io_copy(device->host) jammed if emplace to `cl_wait_list`
    // context.cl_wait_list()->emplace(y_data, event_);
    context.cl_context()->GetCommandQueue().finish();
  }

  std::string doc() const override {
123 124
    return "Trans Layout from cl::Buffer(NCHW) to "
           "cl::Image2D(ImageDefault/RGBA)";
125 126 127 128
  }

 private:
  std::string kernel_func_name_{"buffer_to_image2d"};
129
  std::string build_options_{"-DCL_DTYPE_float "};
130 131 132
  std::shared_ptr<cl::Event> event_{new cl::Event};
};

133 134 135
// [ImageDefault] -> [NCHW]
class LayoutComputeImageDefaultToBufferChw
    : public KernelLite<TARGET(kOpenCL), PRECISION(kAny), DATALAYOUT(kNCHW)> {
136 137 138 139 140 141 142 143 144 145 146
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
    auto& context = ctx_->As<OpenCLContext>();
    context.cl_context()->AddKernel(
        kernel_func_name_, "buffer/layout_kernel.cl", build_options_);
  }

  void Run() override {
    auto& param = Param<param_t>();
147
    auto* y_data = param.y->mutable_data<float, cl::Buffer>(TARGET(kOpenCL));
148
    auto y_dims = param.y->dims();
149 150
    auto* x_data = param.x->data<float, cl::Image2D>();
    auto x_dims = param.x->dims();
151 152

    std::vector<size_t> new_dims = {1, 1, 1, 1};
153 154
    for (int j = 0; j < x_dims.size(); ++j) {
      new_dims[4 - x_dims.size() + j] = x_dims[j];
155 156
    }

157 158 159 160 161 162
    VLOG(4) << "x_dims[" << x_dims.size() << "D]:" << x_dims[0] << " "
            << x_dims[1] << " " << x_dims[2] << " " << x_dims[3];
    VLOG(4) << "y_dims[" << y_dims.size() << "D]:" << y_dims[0] << " "
            << y_dims[1] << " " << y_dims[2] << " " << y_dims[3];
    VLOG(4) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
163

164 165 166 167 168 169
    size_t C = new_dims[1];
    size_t in_height = new_dims[2];
    size_t in_width = new_dims[3];
    int size_ch = in_height * in_width;
    int size_block = size_ch * 4;
    int size_batch = size_ch * C;
170 171 172 173 174 175 176 177 178 179

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
    kernel_key << kernel_func_name_ << build_options_;
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
180
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_width));
181
    CL_CHECK_FATAL(status);
182
    status = kernel.setArg(++arg_idx, static_cast<const int>(in_height));
183
    CL_CHECK_FATAL(status);
184
    status = kernel.setArg(++arg_idx, *y_data);
185
    CL_CHECK_FATAL(status);
186
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_ch));
187
    CL_CHECK_FATAL(status);
188
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_ch));
189
    CL_CHECK_FATAL(status);
190
    status = kernel.setArg(++arg_idx, static_cast<const int>(size_batch));
191
    CL_CHECK_FATAL(status);
192
    status = kernel.setArg(++arg_idx, static_cast<const int>(C));
193
    CL_CHECK_FATAL(status);
194 195
    VLOG(4) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
            << " " << (new_dims[0] * new_dims[2]);
196
    auto global_work_size =
197 198 199
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
200 201 202 203 204 205 206 207 208 209 210 211 212 213
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
    // TODO(ysh329): io_copy(device->host) jammed if emplace to `cl_wait_list`
    // context.cl_wait_list()->emplace(y_data, event_);
    context.cl_context()->GetCommandQueue().finish();
  }

  std::string doc() const override {
214 215
    return "Trans Layout from cl::Image2D(ImageDefault/RGBA) to "
           "cl::Buffer(NCHW)";
216 217 218
  }

 private:
219 220
  std::string kernel_func_name_{"image2d_to_buffer"};
  std::string build_options_{"-DCL_DTYPE_float"};
221 222 223
  std::shared_ptr<cl::Event> event_{new cl::Event};
};

224 225 226 227 228
// [NCHW] -> [ImageDW]
class LayoutComputeBufferChwToImage2DNw
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kFloat),
                        DATALAYOUT(kImageNW)> {
229 230 231 232 233 234 235 236 237 238 239
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
    auto& context = ctx_->As<OpenCLContext>();
    context.cl_context()->AddKernel(
        kernel_func_name_, "buffer/layout_kernel.cl", build_options_);
  }

  void Run() override {
    auto& param = Param<param_t>();
240
    auto* x_data = param.x->data<float, cl::Buffer>();
241 242
    auto x_dims = param.x->dims();

243 244 245 246 247 248 249 250 251
    CHECK(x_dims.size() == 4) << " Tensor dim is not 4.";
    size_t image_width = x_dims[3] * ((x_dims[0] + 3) / 4);
    size_t image_height = x_dims[1] * x_dims[2];

    auto* y_data =
        param.y->mutable_data<float, cl::Image2D>(image_width, image_height);
    auto y_dims = param.y->dims();

    // out info
252
    std::vector<size_t> new_dims = {1, 1, 1, 1};
253 254
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
255 256
    }

257 258 259 260
    const int out_N = new_dims[0];
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
261

262 263 264
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;
265 266 267 268 269 270 271 272 273 274

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);
    STL::stringstream kernel_key;
    kernel_key << kernel_func_name_ << build_options_;
    auto kernel = context.cl_context()->GetKernel(kernel_key.str());

    int arg_idx = 0;
    cl_int status = kernel.setArg(arg_idx, *x_data);
    CL_CHECK_FATAL(status);
275
    status = kernel.setArg(++arg_idx, *y_data);
276
    CL_CHECK_FATAL(status);
277
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_H));
278
    CL_CHECK_FATAL(status);
279
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_W));
280
    CL_CHECK_FATAL(status);
281
    status = kernel.setArg(++arg_idx, static_cast<const int>(out_N));
282
    CL_CHECK_FATAL(status);
283
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride0));
284
    CL_CHECK_FATAL(status);
285
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride1));
286
    CL_CHECK_FATAL(status);
287
    status = kernel.setArg(++arg_idx, static_cast<const int>(Stride2));
288
    CL_CHECK_FATAL(status);
289 290 291

    VLOG(4) << "gws:[3D]" << ((out_N + 3) / 4) << " " << out_W << " "
            << (out_C * out_H);
292
    auto global_work_size =
293 294 295
        cl::NDRange{static_cast<cl::size_type>((out_N + 3) / 4),  // N blocks
                    static_cast<cl::size_type>(out_W),            // w
                    static_cast<cl::size_type>(out_C * out_H)};   // ch
296 297 298 299 300 301 302 303 304 305 306
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
        kernel,
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
    // TODO(ysh329): io_copy(device->host) jammed if emplace to `cl_wait_list`
    // context.cl_wait_list()->emplace(y_data, event_);
    context.cl_context()->GetCommandQueue().finish();
307
    //    auto image_shape = InitImageDimInfoWith(x_dims);
308 309 310
  }

  std::string doc() const override {
311
    return "Trans Layout from cl::Buffer(NCHW) to cl::Image2D(ImageDW/CLNW)";
312 313 314
  }

 private:
315 316
  std::string kernel_func_name_{"buffer_to_image2d_nw"};
  std::string build_options_{"-DCL_DTYPE_float "};
317 318 319 320 321 322 323 324
  std::shared_ptr<cl::Event> event_{new cl::Event};
};

}  // namespace opencl
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

325
// [NCHW] -> [ImageDefault]
326 327 328
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
329
    kAny,
330 331 332
    kImageDefault,
    paddle::lite::kernels::opencl::LayoutComputeBufferChwToImageDefault,
    NCHW_to_ImageDefault)
333 334
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
335
                                      PRECISION(kAny),
336 337 338
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
339
                                       PRECISION(kAny),
340
                                       DATALAYOUT(kImageDefault))})
341 342 343 344 345
    .Finalize();

REGISTER_LITE_KERNEL(
    layout_once,
    kOpenCL,
346
    kAny,
347 348 349
    kImageDefault,
    paddle::lite::kernels::opencl::LayoutComputeBufferChwToImageDefault,
    NCHW_to_ImageDefault)
350 351
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
352
                                      PRECISION(kAny),
353 354 355
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
356
                                       PRECISION(kAny),
357
                                       DATALAYOUT(kImageDefault))})
358 359
    .Finalize();

360
// [ImageDefault] -> [NCHW]
361 362 363
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
364
    kAny,
365
    kNCHW,
366 367
    paddle::lite::kernels::opencl::LayoutComputeImageDefaultToBufferChw,
    ImageDefault_to_NCHW)
368 369
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
370
                                      PRECISION(kAny),
371
                                      DATALAYOUT(kImageDefault))})
372 373
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
374
                                       PRECISION(kAny),
375 376 377 378 379 380
                                       DATALAYOUT(kNCHW))})
    .Finalize();

REGISTER_LITE_KERNEL(
    layout_once,
    kOpenCL,
381
    kAny,
382
    kNCHW,
383 384
    paddle::lite::kernels::opencl::LayoutComputeImageDefaultToBufferChw,
    ImageDefault_to_NCHW)
385 386
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
387
                                      PRECISION(kAny),
388
                                      DATALAYOUT(kImageDefault))})
389 390
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
391
                                       PRECISION(kAny),
392 393
                                       DATALAYOUT(kNCHW))})
    .Finalize();
394

395
// [NCHW] -> [ImageNW]
396 397 398 399 400 401
REGISTER_LITE_KERNEL(
    layout_once,
    kOpenCL,
    kFloat,
    kImageNW,
    paddle::lite::kernels::opencl::LayoutComputeBufferChwToImage2DNw,
402
    NCHW_to_ImageNW)
403 404 405 406 407 408 409 410 411
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
                                      PRECISION(kFloat),
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
                                       PRECISION(kFloat),
                                       DATALAYOUT(kImageNW))})
    .Finalize();