fc_relu_kernel.cpp 2.5 KB
Newer Older
qnqinan's avatar
qnqinan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FCRELU_OP
#include "operators/kernel/fc_relu_kernel.h"
H
hanbuhe 已提交
16

qnqinan's avatar
qnqinan 已提交
17
namespace paddle_mobile {
18
namespace operators {
qnqinan's avatar
qnqinan 已提交
19

20
template <>
N
nhzlx 已提交
21
bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) {
22
  bool relu_enabled = true;
Z
zhangyang 已提交
23 24 25
  auto input_x = const_cast<LoDTensor *>(param->InputX());
  auto filter = const_cast<Tensor *>(param->InputY());
  auto input_z = param->InputZ();
26
  auto input_z_ptr = input_z->data<float>();
Z
zhangyang 已提交
27
  auto out = param->Out();
Z
zhangyang 已提交
28
  PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == filter->dims()[0],
29
                        "Image channel should be equal to weight number");
Z
zhangyang 已提交
30
  int channel = (uint32_t)out->dims()[1];
31 32
  auto bs_ptr =
      (float *)fpga::fpga_malloc(2 * channel * sizeof(float));  // NOLINT
qnqinan's avatar
qnqinan 已提交
33
  for (int i = 0; i < channel; i++) {
Z
zhangyang 已提交
34 35
    bs_ptr[i + channel] = 1;
    bs_ptr[i] = input_z_ptr[i];
qnqinan's avatar
qnqinan 已提交
36 37
  }

Z
zhangyang 已提交
38 39
  int num = (uint32_t)filter->dims()[1];
  int chw = (uint32_t)filter->dims()[0];
Z
zhangyang 已提交
40 41 42
  PADDLE_MOBILE_ENFORCE(
      chw == input_x->numel(),
      "Filter element num should be equal to IFM element num");
Z
zhangyang 已提交
43 44
  int height = (uint32_t)input_x->dims()[2];
  int width = (uint32_t)input_x->dims()[3];
Z
zhangyang 已提交
45 46
  int filter_channel = chw / height / width;

Z
zhangyang 已提交
47 48 49
  filter->Resize(framework::make_ddim({num, filter_channel, height, width}));
  float max_value = fpga::filter_find_max(filter);
  fpga::format_filter(filter, max_value, 1);
qnqinan's avatar
qnqinan 已提交
50

51
  int element_num_per_div = fpga::get_filter_num_per_div(filter, 1);
Z
zhangyang 已提交
52
  fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
53
  fpga::format_fp16_ofm(out);
Z
zhangyang 已提交
54

55
  fpga::WrapperConvArgs conv_arg = {0};
56 57 58
  fpga::fill_conv_arg(&conv_arg, input_x, out, filter, relu_enabled, 1, 1, 1, 0,
                      0, bs_ptr);
  param->SetFpgaArgs(conv_arg);
59 60 61 62
  return true;
}
template <>
void FusionFcReluKernel<FPGA, float>::Compute(
N
nhzlx 已提交
63
    const FusionFcReluParam<FPGA> &param) const {
64
  fpga::ComputeFpgaConv(param.FpgaArgs());
65
}
qnqinan's avatar
qnqinan 已提交
66

67
}  // namespace operators
qnqinan's avatar
qnqinan 已提交
68
}  // namespace paddle_mobile
69
#endif