softmax_kernel.cpp 2.6 KB
Newer Older
H
hanbuhe 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef SOFTMAX_OP

Z
zhangyang 已提交
17 18 19
#include "operators/kernel/softmax_kernel.h"
#include "operators/kernel/central-arm-func/softmax_arm_func.h"

H
hanbuhe 已提交
20 21 22 23
namespace paddle_mobile {
namespace operators {

template <>
N
nhzlx 已提交
24
bool SoftmaxKernel<FPGA, float>::Init(SoftmaxParam<FPGA> *param) {
25
  auto input = const_cast<LoDTensor *>(param->InputX());
26
  auto input_ptr = input->data<float>();
27 28 29
  auto out = param->Out();
  fpga::format_fp32_ofm(out);

Z
zhangyang 已提交
30
  auto float_input = new Tensor;
31 32 33 34 35 36 37 38
  if (input->dims().size() == 2) {
    float_input->mutable_data<float>({1, input->dims()[1]});
  } else if (input->dims().size() == 4) {
    float_input->mutable_data<float>(
        {1, input->dims()[2], input->dims()[3], input->dims()[1]});
  } else {
    DLOG << "wrong dimension of softmax input";
  }
Z
zhangyang 已提交
39

40
  fpga::format_fp32_ofm(float_input);
Z
zhangyang 已提交
41
  fpga::BypassArgs args = {fpga::DATA_TYPE_FP16};
42 43 44 45
  args.input_layout_type = fpga::LAYOUT_HWC;
  args.output_layout_type = fpga::LAYOUT_CHW;
  args.input_data_type = fpga::DATA_TYPE_FP16;
  args.output_data_type = fpga::DATA_TYPE_FP32;
Z
zhangyang 已提交
46
  args.image.address = input_ptr;
47 48 49 50
  args.image.height =
      (input->dims().size() == 4) ? (uint32_t)input->dims()[2] : 1;
  args.image.width =
      (input->dims().size() == 4) ? (uint32_t)input->dims()[3] : 1;
Z
zhangyang 已提交
51
  args.image.channels = (uint32_t)input->dims()[1];
Z
zhangyang 已提交
52 53
  args.output.address = float_input->data<float>();
  args.output.scale_address = float_input->scale;
Z
zhangyang 已提交
54
  param->SetFloatInput(float_input);
55
  param->SetFpgaArgs(args);
H
hanbuhe 已提交
56 57 58 59
  return true;
}

template <>
60
void SoftmaxKernel<FPGA, float>::Compute(const SoftmaxParam<FPGA> &param) {
61 62 63 64
  Tensor *in_x = param.FloatInput();
  Tensor *out = param.Out();

  fpga::PerformBypass(param.FpgaArgs());
65 66
  fpga::fpga_invalidate((void *)in_x->data<float>(),  // NOLINT
                        in_x->numel() * sizeof(float));
67
  // TODO: In general case, 0 should be squeezed before softmax input  // NOLINT
68 69
  math::SoftmaxFuntor<CPU, float>()(in_x, out);
  fpga::fpga_flush(out->data<float>(), out->memory_size());
H
hanbuhe 已提交
70 71 72 73 74 75
}

}  // namespace operators
}  // namespace paddle_mobile

#endif