fetch_kernel.cpp 4.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/kernel/fetch_kernel.h"
namespace paddle_mobile {
namespace operators {

template <>
19
bool FetchKernel<FPGA, float>::Init(FetchParam<FPGA> *param) {
H
hjchen2 已提交
20 21
  auto input = const_cast<LoDTensor *>(param->InputX());
  int col = param->Col();
22
  DLOG << "col = " << col;
H
hjchen2 已提交
23
  auto output = &(param->Out()->at(col));
24
  if (input->type() == type_id<float>()) {
25 26
    return true;
  }
27
  output->init(type_id<float>().hash_code());
28 29
  output->Resize(input->dims());
  fpga::format_fp32_ofm(output);
30 31 32
  int outC = 1;
  int outH = 1;
  int outW = 1;
qnqinan's avatar
update  
qnqinan 已提交
33 34 35 36 37 38
  if (output->dims().size() == 4) {
    outC = output->dims()[1];
    outH = output->dims()[2];
    outW = output->dims()[3];
  } else {  // 2
    outC = output->dims()[1];
39 40 41
  }
  int unalignedCW = outC * outW;
  int alignedCW = fpga::align_to_x(unalignedCW, IMAGE_ALIGNMENT);
qnqinan's avatar
update  
qnqinan 已提交
42 43 44 45 46
  if (alignedCW != unalignedCW) {
    param->aligned_out.Resize(input->dims());
    param->aligned_out.mutable_data<float>(input->dims());
    fpga::fpga_flush(param->aligned_out.data<float>(),
                     outH * unalignedCW * sizeof(float));
47
  }
qnqinan's avatar
qnqinan 已提交
48 49 50 51 52 53
  fpga::BypassArgs args = {fpga::DATA_TYPE_FP16};

  args.input_data_type = fpga::DATA_TYPE_FP16;
  args.output_data_type = fpga::DATA_TYPE_FP32;
  args.input_layout_type = fpga::LAYOUT_CHW;
  args.output_layout_type = fpga::LAYOUT_HWC;
54
  args.image.address = input->data<half>();
55
  args.image.channels = (uint32_t)(input->fpga_data_num);
56 57 58 59 60 61 62 63 64 65
  args.image.height = 1;
  args.image.width = 1;
  args.image.pad_height = 0;
  args.image.pad_width = 0;
  args.output.address = output->data<float>();
  args.output.scale_address = output->scale;
  param->fpga_bypass_args = args;

  return true;
}
66 67 68 69 70 71 72 73 74 75
void dealign(float *src, float *dst, int input_c, int input_h, int input_w) {
  int alignCW = paddle_mobile::fpga::align_to_x(input_c * input_w, 16);
  int dealignCW = input_c * input_w;
  for (int h = 0; h < input_h; ++h) {
    auto input_offset = h * alignCW;
    auto output_offset = h * dealignCW;
    memcpy((dst + output_offset), (src + input_offset),
           dealignCW * sizeof(float));
  }
}
76 77
template <>
void FetchKernel<FPGA, float>::Compute(const FetchParam<FPGA> &param) {
H
update  
hjchen2 已提交
78 79
  auto input = const_cast<LoDTensor *>(param.InputX());
  int col = param.Col();
80
  auto output = &param.Out()->at(col);
81
  if (input->type() == type_id<float>()) {
82 83 84
    output->ShareDataWith(*input);
    return;
  }
qnqinan's avatar
qnqinan 已提交
85

86 87 88
  fpga::BypassArgs args = param.fpga_bypass_args;
  auto input_address = (input->data<half>());
  args.image.address = static_cast<void *>(input_address);
qnqinan's avatar
qnqinan 已提交
89 90 91
  float *outdata_ptr =
      reinterpret_cast<float *>(param.fpga_bypass_args.output.address);
  const int num_th = 32;
92
  if (output->fpga_data_num < num_th) {
qnqinan's avatar
qnqinan 已提交
93 94 95 96 97 98 99 100
    fpga::fpga_invalidate(input_address, (input->fpga_data_num) * sizeof(half));

    for (int idx = 0; idx < product(input->dims()); ++idx) {
      outdata_ptr[idx] = fpga::fp16_2_fp32(input_address[idx]);
    }
    return;
  }

101
  fpga::PerformBypass(args);
102 103 104
  int outC = 1;
  int outH = 1;
  int outW = 1;
qnqinan's avatar
update  
qnqinan 已提交
105 106 107 108 109 110
  if (output->dims().size() == 4) {
    outC = output->dims()[1];
    outH = output->dims()[2];
    outW = output->dims()[3];
  } else {  // 2
    outC = output->dims()[1];
111
  }
qnqinan's avatar
qnqinan 已提交
112

Z
zhangyang0701 已提交
113
  fpga::fpga_invalidate(param.fpga_bypass_args.output.address,
114
                        output->fpga_data_num * sizeof(float));
115 116
  int unalignedCW = outC * outW;
  int alignedCW = fpga::align_to_x(unalignedCW, IMAGE_ALIGNMENT);
qnqinan's avatar
update  
qnqinan 已提交
117 118 119 120 121
  if (unalignedCW != alignedCW) {
    auto aligned_ptr = const_cast<float *>(param.aligned_out.data<float>());
    dealign(outdata_ptr, aligned_ptr, outC, outH, outW);
    memcpy(outdata_ptr, aligned_ptr, outC * outH * outW * sizeof(float));
    fpga::fpga_flush(outdata_ptr, outC * outH * outW * sizeof(float));
122
  }
123
}
124
template class FetchKernel<FPGA, float>;
125 126 127

}  // namespace operators
}  // namespace paddle_mobile