elementwise_add_image_compute.cc 7.1 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "lite/kernels/opencl/elementwise_add_image_compute.h"
Y
Yan Chunwei 已提交
16
#include <memory>
17
#include "lite/backends/opencl/cl_include.h"
Y
Yan Chunwei 已提交
18 19 20 21 22 23 24 25
#include "lite/core/op_registry.h"
#include "lite/utils/replace_stl/stream.h"

namespace paddle {
namespace lite {
namespace kernels {
namespace opencl {

26
void ElementwiseAddImageCompute::PrepareForRun() {}
27

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
void ElementwiseAddImageCompute::ReInitWhenNeeded() {
  ele_param_ = param_.get_mutable<param_t>();
  auto x_dims = ele_param_->X->dims();
  if ((!first_epoch_for_reinit_ && x_dims != last_x_dims_) ||
      first_epoch_for_reinit_) {
    last_x_dims_ = x_dims;
    first_epoch_for_reinit_ = false;

    // choose kernel
    auto* x = ele_param_->X;
    auto* y = ele_param_->Y;
    auto* out = ele_param_->Out;
    auto axis = ele_param_->axis;

    if (y->dims().size() == 4) {
      kernel_func_name_ = "elementwise_add";  // y: ImageDefault
    } else if (y->dims().size() == 1) {
      if (axis == x->dims().size() - 1) {
        kernel_func_name_ = "width_add";  // y: ImageDefault
      } else if (axis == x->dims().size() - 3) {
        kernel_func_name_ = "channel_add";  // y: ImageFolder
      } else {
        LOG(FATAL) << "ElementwiseAddImage doesn't support axis:" << axis
                   << ", x->dims().size():" << x->dims().size()
                   << ", y->dims.size():" << y->dims().size();
      }
54 55 56 57 58
    } else {
      LOG(FATAL) << "ElementwiseAddImage doesn't support axis:" << axis
                 << ", x->dims().size():" << x->dims().size()
                 << ", y->dims.size():" << y->dims().size();
    }
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
    VLOG(1) << "kernel_func_name_:" << kernel_func_name_;

    auto& context = ctx_->As<OpenCLContext>();
    context.cl_context()->AddKernel(
        kernel_func_name_, "image/elementwise_add_kernel.cl", build_options_);

    STL::stringstream kernel_key;
    kernel_key << kernel_func_name_ << build_options_;
    kernel_ = context.cl_context()->GetKernel(kernel_key.str());

    // compute image shape
    paddle::lite::CLImageConverterDefault default_convertor;
    x_img_shape_ = default_convertor.InitImageDimInfoWith(x->dims());  // w, h
    y_img_shape_ = default_convertor.InitImageDimInfoWith(y->dims());
    out_img_shape_ =
        default_convertor.InitImageDimInfoWith(out->dims());  // w, h

    // compute global work size
    GetGlobalWorkSize();
78
  }
79
}
80

81 82 83 84 85 86 87
void ElementwiseAddImageCompute::GetGlobalWorkSize() {
  global_work_size_ = cl::NDRange{static_cast<cl::size_type>(x_img_shape_[0]),
                                  static_cast<cl::size_type>(x_img_shape_[1])};
#ifndef LITE_SHUTDOWN_LOG
  VLOG(4) << "global_work_size:[2D]:" << x_img_shape_[0] << " "
          << x_img_shape_[1];
#endif
88 89 90 91 92 93 94
}

void ElementwiseAddImageCompute::Run() {
  auto* x = ele_param_->X;
  auto* y = ele_param_->Y;
  auto* out = ele_param_->Out;
  auto axis = ele_param_->axis;
95 96 97 98 99 100 101
  auto x_dims = x->dims();
  auto y_dims = y->dims();

  auto* x_img = x->data<half_t, cl::Image2D>();
  auto* y_img = y->data<half_t, cl::Image2D>();
  auto* out_img = out->mutable_data<half_t, cl::Image2D>(out_img_shape_[0],
                                                         out_img_shape_[1]);
102

103
#ifndef LITE_SHUTDOWN_LOG
104 105 106 107 108 109 110 111
  VLOG(4) << "x->target():" << TargetToStr(x->target());
  VLOG(4) << "y->target():" << TargetToStr(y->target());
  VLOG(4) << "out->target():" << TargetToStr(out->target());
  VLOG(4) << "x->dims():" << x->dims();
  VLOG(4) << "y->dims():" << y->dims();
  VLOG(4) << "out->dims():" << out->dims();
  VLOG(4) << "axis:" << axis;

112 113 114 115
  VLOG(4) << "x_img_shape_[w,h]:" << x_img_shape_[0] << " " << x_img_shape_[1];
  VLOG(4) << "y_img_shape_[w,h]:" << y_img_shape_[0] << " " << y_img_shape_[1];
  VLOG(4) << "out_img_shape_[w,h]:" << out_img_shape_[0] << " "
          << out_img_shape_[1];
116
#endif
117

118 119
  cl_int status;
  auto kernel = kernel_;
120
  if (y_dims.size() == 4) {
121
    status = kernel.setArg(0, *x_img);
122
    CL_CHECK_FATAL(status);
123
    status = kernel.setArg(1, *y_img);
124
    CL_CHECK_FATAL(status);
125
    status = kernel.setArg(2, *out_img);
126 127
    CL_CHECK_FATAL(status);
  } else if (y_dims.size() == 1) {
128 129
    if (axis == x_dims.size() - 1 || axis == x_dims.size() - 3) {
      const int tensor_w = x_dims[x_dims.size() - 1];
130
#ifndef LITE_SHUTDOWN_LOG
131
      VLOG(4) << "tensor_w:" << tensor_w;
132
#endif
133
      status = kernel.setArg(0, *x_img);
134
      CL_CHECK_FATAL(status);
135
      status = kernel.setArg(1, *y_img);
136
      CL_CHECK_FATAL(status);
137
      status = kernel.setArg(2, *out_img);
138
      CL_CHECK_FATAL(status);
139
      status = kernel.setArg(3, tensor_w);
140 141 142
      CL_CHECK_FATAL(status);
    } else {
      LOG(FATAL) << "ElementwiseAddImage doesn't support axis:" << axis
143 144
                 << ", x->dims().size():" << x_dims.size()
                 << ", y->dims.size():" << y_dims.size();
145 146 147
    }
  } else {
    LOG(FATAL) << "ElementwiseAddImage doesn't support axis:" << axis
148 149
               << ", x->dims().size():" << x_dims.size()
               << ", y->dims.size():" << y_dims.size();
150 151
  }

152 153 154
  auto& context = ctx_->As<OpenCLContext>();
  CHECK(context.cl_context() != nullptr);
  status = context.cl_context()->GetCommandQueue().enqueueNDRangeKernel(
155 156
      kernel,
      cl::NullRange,
157
      global_work_size_,
158 159 160 161 162 163
      cl::NullRange,
      nullptr,
      event_.get());
  CL_CHECK_FATAL(status);
  context.cl_wait_list()->emplace(out_img, event_);
}
Y
Yan Chunwei 已提交
164 165 166 167 168 169 170

}  // namespace opencl
}  // namespace kernels
}  // namespace lite
}  // namespace paddle

namespace ocl = paddle::lite::kernels::opencl;
171

172
// TODO(ysh329): May need fix.
173 174 175 176 177 178 179
// "Y" may from constant value like conv bias (kARM, need do cl_image_converter
// on CPU);
//     may from anther branch like "X" (kOpenCL, nothing to do).
// Consider 2 situations have different actions when pass running(pick kernel),
//     set target of "Y" as kOpenCL temporarily.
REGISTER_LITE_KERNEL(elementwise_add,
                     kOpenCL,
180
                     kFP16,
181 182 183 184 185
                     kImageDefault,
                     ocl::ElementwiseAddImageCompute,
                     def)
    .BindInput("X",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
186
                                      PRECISION(kFP16),
187 188 189
                                      DATALAYOUT(kImageDefault))})
    .BindInput("Y",
               {LiteType::GetTensorTy(TARGET(kOpenCL),
190
                                      PRECISION(kFP16),
191 192 193
                                      DATALAYOUT(kImageDefault))})
    .BindOutput("Out",
                {LiteType::GetTensorTy(TARGET(kOpenCL),
194
                                       PRECISION(kFP16),
195
                                       DATALAYOUT(kImageDefault))})
Y
Yan Chunwei 已提交
196
    .Finalize();