layout_image_compute.cc 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <memory>
#include <string>
#include "lite/api/paddle_place.h"
18
#include "lite/backends/opencl/cl_half.h"
19 20 21 22 23 24 25 26 27 28 29 30 31
#include "lite/core/kernel.h"
#include "lite/core/op_registry.h"
#include "lite/core/target_wrapper.h"
#include "lite/core/type_system.h"
#include "lite/kernels/opencl/image_helper.h"
#include "lite/operators/op_params.h"
#include "lite/utils/cp_logging.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace opencl {

32 33 34 35 36
// [NCHW] -> [ImageDefault]
class LayoutComputeBufferChwToImageDefault
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kAny),
                        DATALAYOUT(kImageDefault)> {
37 38 39
 public:
  using param_t = operators::LayoutParam;

开心的小妮's avatar
开心的小妮 已提交
40 41 42 43 44
  ~LayoutComputeBufferChwToImageDefault() {
    LOG(INFO) << "Release LayoutComputeBufferChwToImageDefault";
    kernel_.reset();
    event_.reset();
  }
45
  void PrepareForRun() override {
46 47 48 49
    auto& param = Param<param_t>();
    if (param.process_type == 1) {
      kernel_func_name_ = "buffer_to_image2d_with_pre255";
    }
50
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
51
    auto& context = ctx_->As<OpenCLContext>();
开心的小妮's avatar
开心的小妮 已提交
52
    kernel_ = context.cl_context()->CreateKernel(
53
        kernel_func_name_, "image/layout_kernel.cl", build_options_);
54 55 56 57
  }

  void Run() override {
    auto& param = Param<param_t>();
58 59 60 61 62 63
    const cl::Buffer* x_data;
    if (param.process_type == 1) {
      x_data = param.x->data<uint8_t, cl::Buffer>();
    } else {
      x_data = param.x->data<float, cl::Buffer>();
    }
64 65
    auto x_dims = param.x->dims();
    auto image_shape = InitImageDimInfoWith(x_dims);
66
    auto* y_data = param.y->mutable_data<half_t, cl::Image2D>(
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
        image_shape["width"], image_shape["height"]);
    auto y_dims = param.y->dims();

    // out info
    std::vector<size_t> new_dims = {1, 1, 1, 1};
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
    }
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;

82
#ifndef LITE_SHUTDOWN_LOG
83 84 85 86
    VLOG(2) << "param.process_type:" << param.process_type;
    VLOG(2) << "x_dims:" << x_dims;
    VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
    VLOG(2) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
87
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
88 89 90 91 92 93 94 95 96 97
    VLOG(2) << "y_dims:" << y_dims;
    VLOG(2) << "param.y->memory_size():" << param.y->memory_size();
    VLOG(2) << "y image_shape(w,h):" << image_shape["width"] << " "
            << image_shape["height"];
    VLOG(2) << "out_C:" << out_C;
    VLOG(2) << "out_H:" << out_H;
    VLOG(2) << "out_W:" << out_W;
    VLOG(2) << "Stride2:" << Stride2;
    VLOG(2) << "Stride1:" << Stride1;
    VLOG(2) << "Stride0:" << Stride0;
开心的小妮's avatar
开心的小妮 已提交
98 99
    VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
            << " " << (new_dims[0] * new_dims[2]);
100
#endif
101 102 103 104

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);

开心的小妮's avatar
开心的小妮 已提交
105 106
    cl_int status;
    status = kernel_->setArg(0, *x_data);
107
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
108
    status = kernel_->setArg(1, *y_data);
109
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
110
    status = kernel_->setArg(2, out_H);
111
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
112
    status = kernel_->setArg(3, out_W);
113
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
114
    status = kernel_->setArg(4, out_C);
115
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
116
    status = kernel_->setArg(5, Stride0);
117
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
118
    status = kernel_->setArg(6, Stride1);
119
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
120
    status = kernel_->setArg(7, Stride2);
121 122 123 124 125 126 127
    CL_CHECK_FATAL(status);

    auto global_work_size =
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
开心的小妮's avatar
开心的小妮 已提交
128
        *(kernel_.get()),
129 130 131 132 133 134
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
135
    context.cl_wait_list()->emplace(y_data, event_);
136 137 138
  }

  std::string doc() const override {
139
    return "Trans Layout from cl::Buffer(NCHW) to "
140
           "cl::Image2D(ImageDefault/RGBA), Float ---> FP16";
141 142 143 144
  }

 private:
  std::string kernel_func_name_{"buffer_to_image2d"};
145
  std::string build_options_{"-DCL_DTYPE_float"};
146
  std::shared_ptr<cl::Event> event_{new cl::Event};
开心的小妮's avatar
开心的小妮 已提交
147
  std::shared_ptr<cl::Kernel> kernel_;
148 149
};

150 151 152
// [ImageDefault] -> [NCHW]
class LayoutComputeImageDefaultToBufferChw
    : public KernelLite<TARGET(kOpenCL), PRECISION(kAny), DATALAYOUT(kNCHW)> {
153 154 155 156
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
157 158 159 160
    auto& param = Param<param_t>();
    if (param.process_type == 1) {
      kernel_func_name_ = "image2d_to_buffer_with_post255";
    }
161
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;
162
    auto& context = ctx_->As<OpenCLContext>();
开心的小妮's avatar
开心的小妮 已提交
163
    kernel_ = context.cl_context()->CreateKernel(
164
        kernel_func_name_, "image/layout_kernel.cl", build_options_);
165 166 167 168
  }

  void Run() override {
    auto& param = Param<param_t>();
169 170 171 172 173 174
    const cl::Buffer* y_data;
    if (param.process_type == 1) {
      y_data = param.y->mutable_data<uint8_t, cl::Buffer>(TARGET(kOpenCL));
    } else {
      y_data = param.y->mutable_data<float, cl::Buffer>(TARGET(kOpenCL));
    }
175
    auto* x_data = param.x->data<half_t, cl::Image2D>();
176
    auto x_dims = param.x->dims();
177
    auto y_dims = param.y->dims();
178
    auto x_image_shape = InitImageDimInfoWith(x_dims);
179 180

    std::vector<size_t> new_dims = {1, 1, 1, 1};
181 182
    for (int j = 0; j < x_dims.size(); ++j) {
      new_dims[4 - x_dims.size() + j] = x_dims[j];
183 184
    }

185
#ifndef LITE_SHUTDOWN_LOG
186 187 188 189
    VLOG(2) << "param.process_type:" << param.process_type;
    VLOG(2) << "x_dims:" << x_dims;
    VLOG(2) << "param.x->memory_size():" << param.x->memory_size();
    VLOG(2) << "x_image_shape(w,h):" << x_image_shape["width"] << " "
190
            << x_image_shape["height"];
191
    VLOG(2) << "new_dims[" << new_dims.size() << "D]:" << new_dims[0] << " "
192
            << new_dims[1] << " " << new_dims[2] << " " << new_dims[3];
193 194
    VLOG(2) << "y_dims:" << y_dims;
    VLOG(2) << "param.y->memory_size():" << param.y->memory_size();
195
#endif
196

开心的小妮's avatar
开心的小妮 已提交
197 198 199 200 201 202
    const int C = new_dims[1];
    const int in_height = new_dims[2];
    const int in_width = new_dims[3];
    const int size_ch = in_height * in_width;
    const int size_block = size_ch * 4;
    const int size_batch = size_ch * C;
203 204 205 206

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);

开心的小妮's avatar
开心的小妮 已提交
207 208
    cl_int status;
    status = kernel_->setArg(0, *x_data);
209
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
210
    status = kernel_->setArg(1, in_width);
211
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
212
    status = kernel_->setArg(2, in_height);
213
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
214
    status = kernel_->setArg(3, *y_data);
215
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
216
    status = kernel_->setArg(4, size_ch);
217
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
218
    status = kernel_->setArg(5, size_block);
219
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
220
    status = kernel_->setArg(6, size_batch);
221
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
222
    status = kernel_->setArg(7, C);
223
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
224

225
#ifndef LITE_SHUTDOWN_LOG
226
    VLOG(2) << "gws:[3D]" << ((new_dims[1] + 3) / 4) << " " << new_dims[3]
227
            << " " << (new_dims[0] * new_dims[2]);
228
#endif
开心的小妮's avatar
开心的小妮 已提交
229

230
    auto global_work_size =
231 232 233
        cl::NDRange{static_cast<cl::size_type>((new_dims[1] + 3) / 4),
                    static_cast<cl::size_type>(new_dims[3]),
                    static_cast<cl::size_type>(new_dims[0] * new_dims[2])};
234
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
开心的小妮's avatar
开心的小妮 已提交
235
        *(kernel_.get()),
236 237 238 239 240 241
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
242
    context.cl_wait_list()->emplace(y_data, event_);
243 244 245
  }

  std::string doc() const override {
246
    return "Trans Layout from cl::Image2D(ImageDefault/RGBA) to "
247
           "cl::Buffer(NCHW), FP16 ---> Float";
248 249 250
  }

 private:
251 252
  std::string kernel_func_name_{"image2d_to_buffer"};
  std::string build_options_{"-DCL_DTYPE_float"};
253
  std::shared_ptr<cl::Event> event_{new cl::Event};
开心的小妮's avatar
开心的小妮 已提交
254
  std::shared_ptr<cl::Kernel> kernel_;
255 256
};

257 258 259 260 261
// [NCHW] -> [ImageDW]
class LayoutComputeBufferChwToImage2DNw
    : public KernelLite<TARGET(kOpenCL),
                        PRECISION(kFloat),
                        DATALAYOUT(kImageNW)> {
262 263 264 265 266
 public:
  using param_t = operators::LayoutParam;

  void PrepareForRun() override {
    auto& context = ctx_->As<OpenCLContext>();
开心的小妮's avatar
开心的小妮 已提交
267
    kernel_ = context.cl_context()->CreateKernel(
268 269 270 271 272
        kernel_func_name_, "buffer/layout_kernel.cl", build_options_);
  }

  void Run() override {
    auto& param = Param<param_t>();
273
    auto* x_data = param.x->data<float, cl::Buffer>();
274 275
    auto x_dims = param.x->dims();

276 277 278 279 280 281 282 283 284
    CHECK(x_dims.size() == 4) << " Tensor dim is not 4.";
    size_t image_width = x_dims[3] * ((x_dims[0] + 3) / 4);
    size_t image_height = x_dims[1] * x_dims[2];

    auto* y_data =
        param.y->mutable_data<float, cl::Image2D>(image_width, image_height);
    auto y_dims = param.y->dims();

    // out info
285
    std::vector<size_t> new_dims = {1, 1, 1, 1};
286 287
    for (int tidx = 0; tidx < x_dims.size(); ++tidx) {
      new_dims[4 - x_dims.size() + tidx] = x_dims[tidx];
288 289
    }

290 291 292 293
    const int out_N = new_dims[0];
    const int out_C = new_dims[1];
    const int out_H = new_dims[2];
    const int out_W = new_dims[3];
294

295 296 297
    const int Stride2 = out_C * out_H * out_W;
    const int Stride1 = out_H * out_W;
    const int Stride0 = out_W;
298 299 300 301

    auto& context = ctx_->As<OpenCLContext>();
    CHECK(context.cl_context() != nullptr);

开心的小妮's avatar
开心的小妮 已提交
302 303
    cl_int status;
    status = kernel_->setArg(0, *x_data);
304
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
305
    status = kernel_->setArg(1, *y_data);
306
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
307
    status = kernel_->setArg(2, out_H);
308
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
309
    status = kernel_->setArg(3, out_W);
310
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
311
    status = kernel_->setArg(4, out_N);
312
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
313
    status = kernel_->setArg(5, Stride0);
314
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
315
    status = kernel_->setArg(6, Stride1);
316
    CL_CHECK_FATAL(status);
开心的小妮's avatar
开心的小妮 已提交
317
    status = kernel_->setArg(7, Stride2);
318
    CL_CHECK_FATAL(status);
319

开心的小妮's avatar
开心的小妮 已提交
320
#ifndef LITE_SHUTDOWN_LOG
321
    VLOG(2) << "gws:[3D]" << ((out_N + 3) / 4) << " " << out_W << " "
322
            << (out_C * out_H);
开心的小妮's avatar
开心的小妮 已提交
323 324
#endif

325
    auto global_work_size =
326 327 328
        cl::NDRange{static_cast<cl::size_type>((out_N + 3) / 4),  // N blocks
                    static_cast<cl::size_type>(out_W),            // w
                    static_cast<cl::size_type>(out_C * out_H)};   // ch
329
    status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
开心的小妮's avatar
开心的小妮 已提交
330
        *(kernel_.get()),
331 332 333 334 335 336
        cl::NullRange,
        global_work_size,
        cl::NullRange,
        nullptr,
        event_.get());
    CL_CHECK_FATAL(status);
337
    context.cl_wait_list()->emplace(y_data, event_);
338 339 340
  }

  std::string doc() const override {
341
    return "Trans Layout from cl::Buffer(NCHW) to cl::Image2D(ImageDW/CLNW)";
342 343 344
  }

 private:
345 346
  std::string kernel_func_name_{"buffer_to_image2d_nw"};
  std::string build_options_{"-DCL_DTYPE_float "};
347
  std::shared_ptr<cl::Event> event_{new cl::Event};
开心的小妮's avatar
开心的小妮 已提交
348
  std::shared_ptr<cl::Kernel> kernel_;
349 350 351 352 353 354 355
};

}  // namespace opencl
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

356
// [NCHW] -> [ImageDefault]
357 358 359
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
360
    kAny,
361 362 363
    kImageDefault,
    paddle::lite::kernels::opencl::LayoutComputeBufferChwToImageDefault,
    NCHW_to_ImageDefault)
364 365
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
366
                                      PRECISION(kAny),
367 368 369
                                      DATALAYOUT(kNCHW))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
370
                                       PRECISION(kAny),
371
                                       DATALAYOUT(kImageDefault))})
372 373
    .Finalize();

374
// [ImageDefault] -> [NCHW]
375 376 377
REGISTER_LITE_KERNEL(
    layout,
    kOpenCL,
378
    kAny,
379
    kNCHW,
380 381
    paddle::lite::kernels::opencl::LayoutComputeImageDefaultToBufferChw,
    ImageDefault_to_NCHW)
382 383
    .BindInput("Input",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
384
                                      PRECISION(kAny),
385
                                      DATALAYOUT(kImageDefault))})
386 387
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
388
                                       PRECISION(kAny),
389 390
                                       DATALAYOUT(kNCHW))})
    .Finalize();