conv_add_relu_kernel.cpp 2.2 KB
Newer Older
Z
zhangyang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#ifdef FUSION_CONVADDRELU_OP

#include "operators/kernel/conv_add_relu_kernel.h"

namespace paddle_mobile {
namespace operators {

template <>
N
nhzlx 已提交
23
bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) {
Z
zhangyang 已提交
24
  bool relu_enabled = true;
Z
zhangyang 已提交
25
  auto input = const_cast<Tensor *>(param->Input());
Z
zhangyang 已提交
26 27
  const Tensor *bias = param->Bias();
  auto bias_ptr = bias->data<float>();
Z
zhangyang 已提交
28 29
  auto filter = const_cast<Tensor *>(param->Filter());
  auto out = param->Output();
Z
zhangyang 已提交
30

Z
zhangyang 已提交
31 32 33
  PADDLE_MOBILE_ENFORCE(out->dims()[1] == bias->dims()[0],
                        "Output channel should be equal to bias number");
  int channel = out->dims()[1];
Z
zhangyang 已提交
34
  auto bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float));
Z
zhangyang 已提交
35
  for (int i = 0; i < channel; i++) {
Z
zhangyang 已提交
36 37
    bs_ptr[i + channel] = 1;
    bs_ptr[i] = bias_ptr[i];
Z
zhangyang 已提交
38 39
  }

Z
zhangyang 已提交
40 41
  float max_value = fpga::filter_find_max(filter);
  fpga::format_filter(filter, max_value, param->Groups());
Z
zhangyang 已提交
42

Z
zhangyang 已提交
43
  int element_num_per_div =
44
      fpga::get_filter_num_per_div(filter, param->Groups());
Z
zhangyang 已提交
45 46
  fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);

Z
zhangyang 已提交
47
  fpga::format_fp16_ofm(out);
Z
zhangyang 已提交
48

Z
zhangyang 已提交
49
  fpga::WrapperConvArgs conv_arg = {0};
50 51 52 53
  fpga::fill_conv_arg(&conv_arg, input, out, filter, relu_enabled,
                      param->Groups(), param->Strides()[0], param->Strides()[1],
                      param->Paddings()[0], param->Paddings()[1], bs_ptr);
  param->SetFpgaArgs(conv_arg);
Z
zhangyang 已提交
54 55 56 57
  return true;
}

template <>
Z
zhangyang 已提交
58
void ConvAddReluKernel<FPGA, float>::Compute(
N
nhzlx 已提交
59
    const FusionConvAddReluParam<FPGA> &param) const {
Z
zhangyang 已提交
60 61 62 63 64 65 66
  fpga::ComputeFpgaConv(param.FpgaArgs());
}

}  // namespace operators
}  // namespace paddle_mobile

#endif