softmax_kernel.cpp 2.4 KB
Newer Older
D
dolphin8 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
yangfei 已提交
15
#ifdef SOFTMAX_OP
D
dolphin8 已提交
16

Y
yangfei 已提交
17
#include "operators/kernel/softmax_kernel.h"
D
dolphin8 已提交
18 19

namespace paddle_mobile {
20
namespace operators {
D
dolphin8 已提交
21

22 23
template <>
bool SoftmaxKernel<GPU_CL, float>::Init(SoftmaxParam<GPU_CL> *param) {
D
dolphin8 已提交
24
  this->cl_helper_.AddKernel("softmax", "softmax.cl");
25 26
  return true;
}
D
dolphin8 已提交
27

28
template <>
D
dolphin8 已提交
29 30 31
void SoftmaxKernel<GPU_CL, float>::Compute(const SoftmaxParam<GPU_CL> &param) {
  auto kernel = this->cl_helper_.KernelAt(0);
  auto default_work_size = this->cl_helper_.DefaultWorkSize(*(param.Out()));
L
liuruilong 已提交
32 33
  const auto *input = param.InputX();
  auto *output = param.Out();
D
dolphin8 已提交
34 35
  auto inputImage = input->GetCLImage();
  auto outputImage = output->GetCLImage();
L
liuruilong 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

  DLOG << " softmax - output image dim " << output->ImageDims();
  DLOG << " softmax - output image tensor dim " << output->dims();

  int group = output->ImageWidth();

  cl_int status;

  status = clSetKernelArg(kernel, 0, sizeof(cl_mem), &inputImage);
  CL_CHECK_ERRORS(status);

  status = clSetKernelArg(kernel, 1, sizeof(cl_mem), &outputImage);
  CL_CHECK_ERRORS(status);

  status = clSetKernelArg(kernel, 2, sizeof(int), &group);
  CL_CHECK_ERRORS(status);

//  const auto &inputDim = input->dims();
//
//  int dims[4] = {1, 1, 1, 1};
//
//  for (int i = 0; i < inputDim.size(); i++) {
//    dims[4 - inputDim.size() + i] = inputDim[i];
//  }
//
//  clSetKernelArg(kernel, 2, sizeof(int), &dims);
//  clSetKernelArg(kernel, 3, sizeof(int), &dims[1]);
//  clSetKernelArg(kernel, 4, sizeof(int), &dims[2]);
//  clSetKernelArg(kernel, 5, sizeof(int), &dims[3]);
  DLOG << "default_work_size:  " << default_work_size;

  status = clEnqueueNDRangeKernel(this->cl_helper_.CLCommandQueue(), kernel, default_work_size.size(), NULL,
D
dolphin8 已提交
68
                         default_work_size.data(), NULL, 0, NULL, NULL);
L
liuruilong 已提交
69 70
  CL_CHECK_ERRORS(status);

D
dolphin8 已提交
71
}
D
dolphin8 已提交
72

73
template class SoftmaxKernel<GPU_CL, float>;
D
dolphin8 已提交
74

75
}  // namespace operators
D
dolphin8 已提交
76
}  // namespace paddle_mobile
Y
yangfei 已提交
77
#endif