api.cpp 41.1 KB
Newer Older
H
hanbuhe 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Z
zhangyang 已提交
15 16
#include "fpga/V1/api.h"
#include "fpga/V1/bias_scale.h"
Z
zhangyang 已提交
17
#include "fpga/V1/deconv_filter.h"
Z
zhangyang 已提交
18 19
#include "fpga/V1/filter.h"
#include "fpga/V1/image.h"
Z
zhangyang 已提交
20

Z
zhangyang 已提交
21
namespace paddle_mobile {
H
hanbuhe 已提交
22 23
namespace fpga {

24 25 26
#define USE_RELU 1
#define USE_BIAS 2

Z
zhangyang 已提交
27 28
void format_image(framework::Tensor *image_tensor) {
  auto dims = image_tensor->dims();
Z
zhangyang 已提交
29
  auto channel = dims[1], height = dims[2], width = dims[3];
30
  auto data_ptr = image_tensor->data<float>();
31 32
  auto external_ptr = reinterpret_cast<float *>(image_tensor->external_data);
  float *p_data = external_ptr == nullptr ? data_ptr : external_ptr;
J
jameswu2014 已提交
33

34
  image::format_image(&p_data, channel, height, width);
J
jameswu2014 已提交
35
  if (p_data != data_ptr) {
36 37
    image_tensor->reset_data_ptr(p_data);
  }
Z
zhangyang 已提交
38 39
}

Z
zhangyang0701 已提交
40 41 42 43 44 45 46
void format_ofm(framework::Tensor *ofm_tensor) {
  if (ofm_tensor->type() == typeid(float)) {
    format_fp32_ofm(ofm_tensor);
  } else {
    format_fp16_ofm(ofm_tensor);
  }
}
47
void format_fp16_ofm(framework::Tensor *ofm_tensor) {
Z
zhangyang 已提交
48
  auto dims = ofm_tensor->dims();
49 50
  size_t memory_size = 0;
  if (dims.size() == 4) {
J
jameswu2014 已提交
51 52 53
    auto channel = dims[1], height = dims[2], width = dims[3], num = dims[0];
    memory_size = num * height * align_to_x(channel * width, IMAGE_ALIGNMENT) *
                  sizeof(half);
54 55 56 57 58 59 60 61
  } else if (dims.size() == 2) {
    memory_size = align_to_x(dims[1], IMAGE_ALIGNMENT) * sizeof(half);
  } else {
    DLOG << "Wrong ofm dimension";
  }
  auto p = fpga_malloc(memory_size);
  memset(p, 0, memory_size);
  ofm_tensor->reset_data_ptr(p);
62
  ofm_tensor->set_type(typeid(half));
63 64
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
void format_fp16_ofm(framework::Tensor *ofm_tensor, framework::DDim dims) {
  // auto dims = ofm_tensor->dims();
  size_t memory_size = 0;
  if (dims.size() == 4) {
    auto channel = dims[1], height = dims[2], width = dims[3];
    memory_size =
        height * align_to_x(channel * width, IMAGE_ALIGNMENT) * sizeof(half);
  } else if (dims.size() == 2) {
    memory_size = align_to_x(dims[1], IMAGE_ALIGNMENT) * sizeof(half);
  } else {
    DLOG << "Wrong ofm dimension";
  }
  auto p = fpga_malloc(memory_size);
  memset(p, 0, memory_size);
  ofm_tensor->reset_data_ptr(p);
80
  ofm_tensor->set_type(typeid(half));
81
}
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
void format_fp32_ofm(framework::Tensor *ofm_tensor) {
  auto dims = ofm_tensor->dims();
  size_t memory_size = 0;
  if (dims.size() == 4) {
    auto channel = dims[1], height = dims[2], width = dims[3];
    memory_size =
        height * align_to_x(channel * width, IMAGE_ALIGNMENT) * sizeof(float);
  } else if (dims.size() == 2) {
    memory_size = align_to_x(dims[1], IMAGE_ALIGNMENT) * sizeof(float);
  } else {
    DLOG << "Wrong ofm dimension";
  }
  auto p = fpga_malloc(memory_size);
  memset(p, 0, memory_size);
  ofm_tensor->reset_data_ptr(p);
97
  ofm_tensor->set_type(typeid(float));
Z
zhangyang 已提交
98 99
}

Z
zhangyang 已提交
100 101 102 103
float filter_find_max(framework::Tensor *filter_tensor) {
  auto filter_ptr = filter_tensor->data<float>();
  return filter::find_max(filter_ptr, filter_tensor->numel());
}
Z
zhangyang 已提交
104 105 106

int get_plit_num(framework::Tensor *filter_tensor) {
  auto dims = filter_tensor->dims();
Z
zhangyang 已提交
107 108
  auto chw = dims[1] * dims[2] * dims[3];
  auto num = dims[0];
Z
zhangyang 已提交
109 110 111
  int div_capacity = filter::calc_division_capacity(chw);
  return filter::calc_split_num(num, div_capacity);
}
Z
zhangyang 已提交
112 113 114 115 116 117 118
int get_deconv_plit_num(framework::Tensor *filter_tensor, int stride) {
  auto dims = filter_tensor->dims();
  auto chw = dims[1] * dims[2] / stride * dims[3] / stride;
  auto num = dims[0] * stride;
  int div_capacity = filter::calc_division_capacity(chw);
  return filter::calc_split_num(num, div_capacity);
}
Z
zhangyang 已提交
119

120
int get_filter_num_per_div(framework::Tensor *filter_tensor, int group_num) {
Z
zhangyang 已提交
121
  auto dims = filter_tensor->dims();
Z
zhangyang 已提交
122 123
  auto chw = dims[1] * dims[2] * dims[3];
  auto num = dims[0];
Z
zhangyang 已提交
124 125 126 127
  int div_capacity = filter::calc_division_capacity(chw);
  return filter::calc_num_per_div(num, group_num, div_capacity);
}

Z
zhangyang 已提交
128 129 130 131 132 133 134 135 136
int get_deconv_filter_num_per_div(framework::Tensor *filter_tensor,
                                  int group_num, int stride) {
  auto dims = filter_tensor->dims();
  auto chw = dims[1] * dims[2] / stride * dims[3] / stride;
  auto num = dims[0] * stride;
  int div_capacity = filter::calc_division_capacity(chw);
  return filter::calc_num_per_div(num, group_num, div_capacity);
}

Z
zhangyang 已提交
137 138 139 140
int get_aligned_filter_element_num(int chw) {
  return align_to_x(chw, FILTER_ELEMENT_ALIGNMENT);
}

Z
zhangyang 已提交
141 142
void format_filter(framework::Tensor *filter_tensor, float max_value,
                   int group_num) {
143 144
  filter_tensor->scale[0] = float(max_value / 127.0);  // NOLINT
  filter_tensor->scale[1] = float(127.0 / max_value);  // NOLINT
Z
zhangyang 已提交
145
  auto dims = filter_tensor->dims();
Z
zhangyang 已提交
146
  auto num = dims[0], channel = dims[1], height = dims[2], width = dims[3];
147
  auto data_ptr = filter_tensor->data<float>();
Z
zhangyang 已提交
148
  size_t memory_size = num * channel * height * width * sizeof(float);
149
  auto new_data = (float *)fpga_malloc(memory_size);  // NOLINT
Z
zhangyang 已提交
150 151 152 153
  fpga_copy(new_data, data_ptr, memory_size);
  filter::format_filter(&new_data, num, channel, height, width, group_num,
                        max_value);
  filter_tensor->reset_data_ptr(new_data);
154
  filter_tensor->set_type(typeid(int8_t));
Z
zhangyang 已提交
155
}
156 157 158 159 160 161 162 163 164
void format_dwconv_filter(framework::Tensor *filter_tensor, float *scale_ptr) {
  auto dims = filter_tensor->dims();
  auto num = dims[0], height = dims[2], width = dims[3];
  auto data_ptr = filter_tensor->data<float>();
  size_t memory_size = num * height * width * sizeof(float);
  auto new_data = (float *)fpga_malloc(memory_size);  // NOLINT
  fpga_copy(new_data, data_ptr, memory_size);
  filter::format_dwconv_filter(&new_data, num, height, width, scale_ptr);
  filter_tensor->reset_data_ptr(new_data);
165
  filter_tensor->set_type(typeid(int8_t));
166
}
Z
zhangyang 已提交
167

qnqinan's avatar
qnqinan 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
void format_DWDconv_filter(framework::Tensor *filter_tensor, float *scale_ptr,
                           int stride) {
  auto dims = filter_tensor->dims();
  auto num = dims[0], height = dims[2], width = dims[3];
  auto data_ptr = filter_tensor->data<float>();
  size_t memory_size = num * height * width * sizeof(float);
  auto new_data = (float *)fpga_malloc(memory_size);  // NOLINT
  fpga_copy(new_data, data_ptr, memory_size);

  int hw = height * width;
  deconv_filter::deconv_NC_convert(&new_data, num, 1, hw);

  num = dims[1];
  int channel = dims[0];

  deconv_filter::DWDconv_format_filter(&new_data, num, channel, height, width,
                                       scale_ptr, stride);

  //  framework::DDim dims_new =
  //      framework::make_ddim({num, 1, height, width});
  //  filter_tensor->Resize(dims_new);
  filter_tensor->reset_data_ptr(new_data);
190
  filter_tensor->set_type(typeid(int8_t));
qnqinan's avatar
qnqinan 已提交
191 192
}

Z
zhangyang 已提交
193 194 195 196 197 198 199 200 201 202 203 204
void format_fc_filter(framework::Tensor *filter_tensor, float max_value) {
  filter_tensor->scale[0] = float(max_value / 127.0);  // NOLINT
  filter_tensor->scale[1] = float(127.0 / max_value);  // NOLINT
  auto dims = filter_tensor->dims();
  auto num = dims[0], channel = dims[1], height = dims[2], width = dims[3];
  auto data_ptr = filter_tensor->data<float>();
  size_t memory_size = num * channel * height * width * sizeof(float);
  auto new_data = (float *)fpga_malloc(memory_size);  // NOLINT
  fpga_copy(new_data, data_ptr, memory_size);
  filter::format_fc_filter(&new_data, num, channel, height, width, 1,
                           max_value);
  filter_tensor->reset_data_ptr(new_data);
205
  filter_tensor->set_type(typeid(int8_t));
Z
zhangyang 已提交
206
}
Z
zhangyang 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
void format_deconv_filter(framework::Tensor *filter_tensor, float max_value,
                          int group_num, int stride) {
  filter_tensor->scale[0] = float(max_value / 127.0);  // NOLINT
  filter_tensor->scale[1] = float(127.0 / max_value);  // NOLINT
  auto dims = filter_tensor->dims();
  auto num = dims[0], channel = dims[1], height = dims[2], width = dims[3];
  auto data_ptr = filter_tensor->data<float>();
  size_t memory_size = num * channel * height * width * sizeof(float);
  auto new_data = (float *)fpga_malloc(memory_size);  // NOLINT
  memcpy(new_data, data_ptr, memory_size);

  int hw = height * width;
  deconv_filter::deconv_NC_convert(&new_data, num, channel, hw);

  num = dims[1];
  channel = dims[0];
  deconv_filter::deconv_format_filter(
      &new_data, (int)num, (int)channel,          // NOLINT
      (int)height,                                // NOLINT
      (int)width, group_num, max_value, stride);  // NOLINT

  framework::DDim dims_new =
      framework::make_ddim({num, channel, height, width});
  filter_tensor->Resize(dims_new);
  filter_tensor->reset_data_ptr(new_data);
232
  filter_tensor->set_type(typeid(int8_t));
Z
zhangyang 已提交
233
}
Z
zhangyang 已提交
234

Z
zhangyang 已提交
235 236 237 238 239
void format_bias_scale_array(float **bias_scale_array,
                             int element_num_per_division, int num) {
  bias_scale::format_bias_scale_array(bias_scale_array,
                                      element_num_per_division, num);
}
240 241 242
void format_bias_array(float **bias_array, int num) {
  bias_scale::format_bias_array(bias_array, num);
}
Z
zhangyang 已提交
243

Z
zhangyang 已提交
244 245 246 247 248 249 250 251 252
void format_concat_output(framework::Tensor *out, int height, int width,
                          int image_num, uint32_t *channel_num) {
  int sum_channel = 0, sum_cw = 0;
  for (int i = 0; i < image_num; i++) {
    sum_channel += channel_num[i];
  }

  sum_cw = align_to_x(width * sum_channel, IMAGE_ALIGNMENT);
  auto data_ptr = fpga_malloc(height * sum_cw * sizeof(half));
253
  auto ddim = framework::make_ddim({1, sum_channel, height, width});
Z
zhangyang 已提交
254 255
  out->Resize(ddim);
  out->reset_data_ptr(data_ptr);
256
  out->set_type(typeid(half));
Z
zhangyang 已提交
257
}
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
void format_conv_data(framework::Tensor *filter_tensor,
                      framework::Tensor *ofm_tensor, float **bs_ptr,
                      int group) {
  float max_value = fpga::filter_find_max(filter_tensor);
  fpga::format_filter(filter_tensor, max_value, group);
  int element_num_per_div = fpga::get_filter_num_per_div(filter_tensor, group);
  fpga::format_bias_scale_array(bs_ptr, element_num_per_div,
                                ofm_tensor->dims()[1]);
  fpga::format_fp16_ofm(ofm_tensor);
}
void format_deconv_data(framework::Tensor *filter_tensor,
                        framework::Tensor *ofm_tensor, float **bs_ptr,
                        int group, int sub_conv_n) {
  int channel = ofm_tensor->dims()[1];
  float max_value = filter_find_max(filter_tensor);
  format_deconv_filter(filter_tensor, max_value, group, sub_conv_n);
  int element_num_per_div =
      get_deconv_filter_num_per_div(filter_tensor, group, sub_conv_n);
  format_bias_scale_array(bs_ptr, element_num_per_div, channel * sub_conv_n);
  format_fp16_ofm(ofm_tensor);
}
Z
zhangyang 已提交
279

280 281 282 283 284 285 286 287
void format_dwconv_data(framework::Tensor *filter_tensor,
                        framework::Tensor *ofm_tensor, float *scale_ptr,
                        float **bias_ptr) {
  auto channel = ofm_tensor->dims()[1];
  format_dwconv_filter(filter_tensor, scale_ptr);
  format_bias_array(bias_ptr, channel);
  format_fp16_ofm(ofm_tensor);
}
qnqinan's avatar
qnqinan 已提交
288 289 290 291 292 293 294 295 296 297 298
void format_DWDeconv_data(framework::Tensor *filter_tensor,
                          framework::Tensor *ofm_tensor, float **bs_ptr,
                          int group, int sub_conv_n) {
  int channel = ofm_tensor->dims()[1];
  // dw-deconv
  format_DWDconv_filter(
      filter_tensor,
      (reinterpret_cast<float *>(*bs_ptr) + sub_conv_n * channel), sub_conv_n);
  format_bias_array(bs_ptr, channel);
  format_fp16_ofm(ofm_tensor);
}
299 300
void expand_conv_arg(ConvArgs *arg) {
  ConvArgs args = *arg;
301 302

  auto fpga_bias_scale_len =
303 304
      align_to_x(args.filter_num / args.group_num, 8) * args.group_num;

305
  auto output_height =
306 307 308
      (args.image.height + args.image.pad_height * 2 - args.kernel.height) /
          args.kernel.stride_h +
      1;
309
  auto output_width =
310 311 312
      (args.image.width + args.image.pad_width * 2 - args.kernel.width) /
          args.kernel.stride_w +
      1;
313 314 315 316 317 318 319 320 321 322

  auto filter_per_group = args.filter_num / args.group_num;
  auto channel_per_group = args.image.channels / args.group_num;

  auto image_row_count = args.image.width * args.image.channels;
  auto image_amount_per_row = align_to_x(image_row_count, IMAGE_ALIGNMENT);
  auto image_one_pad_per_row = align_to_x(image_row_count, IMAGE_ALIGNMENT) +
                               args.image.pad_width * args.image.channels;
  auto filter_amount_all =
      align_to_x(args.kernel.height * args.kernel.width * channel_per_group,
323 324
                 FILTER_ELEMENT_ALIGNMENT);

325 326 327
  auto output_amount_per_row = align_to_x(
      (output_width - (args.deconv_tx_param.omit_size) * 2) * args.filter_num,
      IMAGE_ALIGNMENT);
328 329 330 331

  // find the opt partition strategy
  uint64_t res_win;
  uint64_t res_fit = 0;
332
  for (res_win = 1; res_win <= output_width; res_win++) {
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
    if ((align_to_x(
             (args.image.channels *
              (args.kernel.width + (res_win - 1) * args.kernel.stride_w)),
             IMAGE_ALIGNMENT) /
             16 +
         1) *
            args.kernel.height >
        2048) {
      break;
    }
  }

  if (res_win != output_width) {
    res_win -= 1;
  }

  if (((res_win % 2) != 0) && (res_win != 1)) {
    res_win = res_win - 1;
  }
  res_fit = res_win;

354 355 356
  auto block_num = (output_width + res_fit - 1) / res_fit;
  auto block_len = res_fit;
  auto block_last = output_width - res_fit * (block_num - 1);
357

358 359
  auto res_amount_per_row =
      (output_width - (args.deconv_tx_param.omit_size) * 2) * args.filter_num;
360
  auto res_amount_per_row_pad = output_amount_per_row - res_amount_per_row;
361

362 363 364
  auto image_block_amount_per_row =
      args.kernel.stride_w * res_fit * args.image.channels;
  auto filter_pad_width_mul_channel =
365
      args.image.pad_width * args.image.channels;
366
  auto image_amount_per_row_multi_win_first =
qnqinan's avatar
qnqinan 已提交
367
      image_amount_per_row * (2 * args.kernel.stride_h - args.image.pad_height);
368
  auto image_amount_per_row_multi_win =
qnqinan's avatar
qnqinan 已提交
369
      image_amount_per_row * (2 * args.kernel.stride_h);
370

371 372
  auto image_block_num = block_num;
  auto image_block_len =
373 374 375 376 377
      align_to_x((args.image.channels *
                  (args.kernel.width + (block_len - 1) * args.kernel.stride_w)),
                 IMAGE_ALIGNMENT) /
          16 +
      1;
378
  auto image_block_len_last =
379 380 381 382 383 384
      align_to_x(
          (args.image.channels *
           (args.kernel.width + (block_last - 1) * args.kernel.stride_w)),
          IMAGE_ALIGNMENT) /
          16 +
      1;
385 386 387
  auto image_win_cnt = block_len;
  auto image_win_cnt_last = block_last;
  auto res_row_data_align4_pad = res_amount_per_row_pad / 8;
388 389
  auto prog_full_cnt = 1024 / (filter_amount_all / 16 * 2) - 1;
  if (prog_full_cnt == 511) {
390 391
    prog_full_cnt--;
  }
392
  auto post_prog_full_cnt =
393 394 395
      (512 / (align_to_x(args.filter_num, 4) / 4 * 2) > 2)
          ? (512 / (align_to_x(args.filter_num, 4) / 4 * 2) - 2)
          : 0;
qnqinan's avatar
qnqinan 已提交
396 397
  // auto cmd = 0UL | (args.relu_enabled ? USE_RELU : 0) | USE_BIAS;
  auto cmd = 0UL | USE_BIAS;
398

399 400 401
  auto deconv_param = ((args.deconv_tx_param.deconv_en) << 24) |
                      ((args.deconv_tx_param.sub_conv_num) << 16) |
                      ((args.deconv_tx_param.omit_size) << 0);
402 403 404
  (*arg).driver.image_address_phy = vaddr_to_paddr(args.image.address);
  (*arg).driver.sb_address_phy = vaddr_to_paddr(args.sb_address);
  (*arg).driver.filter_address_phy = vaddr_to_paddr(args.filter_address);
405 406
  (*arg).driver.output_address_phy = vaddr_to_paddr(args.output.address) +
                                     args.deconv_tx_param.out_addr_offset;
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
  (*arg).driver.output_height = output_height;
  (*arg).driver.output_width = output_width;
  (*arg).driver.filter_per_group = filter_per_group;
  (*arg).driver.channel_per_group = channel_per_group;
  (*arg).driver.image_amount_per_row = image_amount_per_row;
  (*arg).driver.image_one_pad_per_row = image_one_pad_per_row;
  (*arg).driver.filter_amount_all = filter_amount_all;
  (*arg).driver.output_amount_per_row = output_amount_per_row;
  (*arg).driver.image_block_amount_per_row = image_block_amount_per_row;
  (*arg).driver.filter_pad_width_mul_channel = filter_pad_width_mul_channel;
  (*arg).driver.image_amount_per_row_multi_win_first =
      image_amount_per_row_multi_win_first;
  (*arg).driver.image_amount_per_row_multi_win = image_amount_per_row_multi_win;
  (*arg).driver.image_block_num = image_block_num;
  (*arg).driver.image_block_len = image_block_len;
  (*arg).driver.image_block_len_last = image_block_len_last;
  (*arg).driver.image_win_cnt = image_win_cnt;
  (*arg).driver.image_win_cnt_last = image_win_cnt_last;
  (*arg).driver.res_row_data_align4_pad = res_row_data_align4_pad;
  (*arg).driver.prog_full_cnt = prog_full_cnt;
  (*arg).driver.post_prog_full_cnt = post_prog_full_cnt;
  (*arg).driver.fpga_bias_scale_len = fpga_bias_scale_len;
  (*arg).driver.cmd = cmd;
430
  (*arg).driver.deconv_param = deconv_param;
431 432 433 434
}  // expand_conv_arg()

void expand_EW_arg(EWAddArgs *arg) {
  EWAddArgs args = *arg;
qnqinan's avatar
qnqinan 已提交
435 436
  // uint64_t cmd = args.relu_enabled ? USE_RELU : 0;
  uint64_t cmd = 0;
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
  uint64_t datalen = (uint64_t)args.image0.width *
                     (uint64_t)args.image0.height *
                     (uint64_t)args.image0.channels;
  uint64_t coefficient = (uint64_t)args.const0 << 32 | (uint64_t)args.const1;
  uint64_t image0_address_phy = vaddr_to_paddr(args.image0.address);
  uint64_t image1_address_phy = vaddr_to_paddr(args.image1.address);
  uint64_t output_address_phy = vaddr_to_paddr(args.output.address);

  uint64_t image_amount_per_row =
      align_to_x((uint64_t)args.image0.width * (uint64_t)args.image0.channels,
                 IMAGE_ALIGNMENT);
  uint64_t image_image_pixel = ((uint64_t)args.image0.channels << 32) |
                               ((uint64_t)args.image0.width << 16) |
                               (uint64_t)args.image0.height;

  (*arg).driver.image0_address_phy = image0_address_phy;
  (*arg).driver.image1_address_phy = image1_address_phy;
  (*arg).driver.datalen = datalen;
  (*arg).driver.image_image_pixel = image_image_pixel;
  (*arg).driver.image_amount_per_row = image_amount_per_row;
  (*arg).driver.output_address_phy = output_address_phy;
  (*arg).driver.coefficient = coefficient;
  (*arg).driver.cmd = cmd;
}  // expand_EW_arg

Z
zhangyang 已提交
462 463
void fill_split_arg(struct SplitConvArgs *arg, framework::Tensor *input,
                    framework::Tensor *out, framework::Tensor *filter,
qnqinan's avatar
qnqinan 已提交
464 465 466 467
                    ActivationType activation_enable,
                    int16_t leaky_relu_negative_slope, int group_num,
                    int stride_h, int stride_w, int padding_h, int padding_w,
                    float *bs_ptr) {
468 469 470
  auto input_ptr = input->data<half>();
  auto filter_ptr = filter->data<int8_t>();
  auto out_ptr = out->data<half>();
Z
zhangyang 已提交
471
  auto deleter = [](void *p) { fpga_free(p); };
472 473

  arg->group_num = (uint32_t)group_num;
474 475
  // Either group_num or split_num = 1;
  arg->split_num = group_num == 1 ? (uint32_t)get_plit_num(filter) : 1;
476 477 478
  arg->filter_num = (uint32_t)filter->dims()[0];
  arg->output.address = out_ptr;
  arg->output.scale_address = out->scale;
Z
zhangyang 已提交
479
  arg->conv_arg =
480
      (ConvArgs *)fpga_malloc(arg->split_num * sizeof(ConvArgs));  // NOLINT
481

Z
zhangyang 已提交
482 483
  arg->shared_conv_arg = std::shared_ptr<ConvArgs>(arg->conv_arg, deleter);

484 485
  memset(arg->conv_arg, 0, arg->split_num * sizeof(struct ConvArgs));

486 487 488
  arg->concat_arg.image_num = arg->split_num;
  arg->concat_arg.image_out = out_ptr;
  arg->concat_arg.scale_out = out->scale;
489 490
  arg->concat_arg.height = (uint32_t)out->dims()[2];
  arg->concat_arg.width = (uint32_t)out->dims()[3];
491 492

  int n = arg->split_num;
493
  arg->concat_arg.images_in =
Z
zhangyang 已提交
494
      static_cast<int16_t **>(fpga_malloc(n * sizeof(int *)));
495
  arg->concat_arg.scales_in =
Z
zhangyang 已提交
496
      static_cast<float **>(fpga_malloc(n * sizeof(float *)));
497
  arg->concat_arg.channel_num =
Z
zhangyang 已提交
498 499 500 501 502 503 504
      static_cast<uint32_t *>(fpga_malloc(n * sizeof(uint32_t)));
  arg->vector_concat_space.push_back(std::shared_ptr<char>(
      reinterpret_cast<char *>(arg->concat_arg.images_in), deleter));
  arg->vector_concat_space.push_back(std::shared_ptr<char>(
      reinterpret_cast<char *>(arg->concat_arg.scales_in), deleter));
  arg->vector_concat_space.push_back(std::shared_ptr<char>(
      reinterpret_cast<char *>(arg->concat_arg.channel_num), deleter));
505

506 507 508
  auto channel = (int)out->dims()[1];  // NOLINT
  int filter_num_per_div = get_filter_num_per_div(filter, group_num);
  int element_num = get_aligned_filter_element_num(
509 510
      (int)(filter->dims()[1] * filter->dims()[2] *  // NOLINT
            filter->dims()[3]));
511 512

  for (int i = 0; i < n; i++) {
qnqinan's avatar
qnqinan 已提交
513 514 515 516
    // arg->conv_arg[i].relu_enabled = relu_enabled;
    arg->conv_arg[i].output.activation.activation_type = activation_enable;
    arg->conv_arg[i].output.activation.leaky_relu_negative_slope =
        leaky_relu_negative_slope;
Z
zhangyang 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530
    arg->conv_arg[i].group_num = (uint32_t)group_num;
    arg->conv_arg[i].kernel.stride_h = (uint32_t)stride_h;
    arg->conv_arg[i].kernel.stride_w = (uint32_t)stride_w;
    arg->conv_arg[i].kernel.height = (uint32_t)filter->dims()[2];
    arg->conv_arg[i].kernel.width = (uint32_t)filter->dims()[3];
    arg->conv_arg[i].image.address = input_ptr;
    arg->conv_arg[i].image.channels = (uint32_t)input->dims()[1];
    arg->conv_arg[i].image.height = (uint32_t)input->dims()[2];
    arg->conv_arg[i].image.width = (uint32_t)input->dims()[3];
    arg->conv_arg[i].image.scale_address = input->scale;
    arg->conv_arg[i].image.pad_height = (uint32_t)padding_h;
    arg->conv_arg[i].image.pad_width = (uint32_t)padding_w;
    arg->conv_arg[i].filter_scale_address = filter->scale;
    arg->conv_arg[i].filter_num = (uint32_t)(
531 532
        i == n - 1 ? channel - (n - 1) * filter_num_per_div  // NOLINT
                   : filter_num_per_div);
533

Z
zhangyang 已提交
534
    size_t filter_size =
535 536 537
        element_num *
        align_to_x(arg->conv_arg[i].filter_num, FILTER_NUM_ALIGNMENT) *
        sizeof(int8_t);
538 539
    auto filter_head = &(
        (int8_t *)filter_ptr)[i * element_num * filter_num_per_div];  // NOLINT
Z
zhangyang 已提交
540
    arg->conv_arg[i].filter_address = fpga_malloc(filter_size);
Z
zhangyang 已提交
541 542
    arg->vector_conv_space.push_back(std::shared_ptr<char>(
        reinterpret_cast<char *>(arg->conv_arg[i].filter_address), deleter));
Z
zhangyang 已提交
543 544 545
    memcpy(arg->conv_arg[i].filter_address, filter_head, filter_size);
    fpga_flush(arg->conv_arg[i].filter_address, filter_size);

546 547 548
    size_t bs_size = 2 *
                     align_to_x(arg->conv_arg[i].filter_num, BS_NUM_ALIGNMENT) *
                     sizeof(float);
Z
zhangyang 已提交
549 550
    auto bs_head = &bs_ptr[i * filter_num_per_div * 2];
    arg->conv_arg[i].sb_address = fpga_malloc(bs_size);
Z
zhangyang 已提交
551 552
    arg->vector_conv_space.push_back(std::shared_ptr<char>(
        reinterpret_cast<char *>(arg->conv_arg[i].sb_address), deleter));
Z
zhangyang 已提交
553 554 555
    memcpy(arg->conv_arg[i].sb_address, bs_head, bs_size);
    fpga_flush(arg->conv_arg[i].sb_address, bs_size);

556
    if (n > 1) {
Z
zhangyang 已提交
557
      arg->conv_arg[i].output.scale_address =
Z
zhangyang 已提交
558
          static_cast<float *>(fpga_malloc(2 * sizeof(float)));
559 560 561 562 563 564
      arg->conv_arg[i].output.address =
          fpga_malloc(out->dims()[2] *
                      align_to_x((int)(out->dims()[3] *  // NOLINT
                                       arg->conv_arg[i].filter_num),
                                 IMAGE_ALIGNMENT) *
                      sizeof(half));
Z
zhangyang 已提交
565 566 567 568 569
      arg->vector_conv_space.push_back(std::shared_ptr<char>(
          reinterpret_cast<char *>(arg->conv_arg[i].output.scale_address),
          deleter));
      arg->vector_conv_space.push_back(std::shared_ptr<char>(
          reinterpret_cast<char *>(arg->conv_arg[i].output.address), deleter));
570
    } else {
Z
zhangyang 已提交
571 572
      arg->conv_arg[i].output.scale_address = out->scale;
      arg->conv_arg[i].output.address = out_ptr;
573 574
    }

575
    arg->concat_arg.images_in[i] =
Z
zhangyang 已提交
576 577 578
        (half *)arg->conv_arg[i].output.address;  // NOLINT
    arg->concat_arg.scales_in[i] = arg->conv_arg[i].output.scale_address;
    arg->concat_arg.channel_num[i] = arg->conv_arg[i].filter_num;
579 580

    expand_conv_arg(&arg->conv_arg[i]);
581
  }
Z
zhangyang 已提交
582 583
  filter->reset_data_ptr(nullptr);
  fpga_free(bs_ptr);
584 585
}  // fill_split_arg

Z
zhangyang 已提交
586 587
void fill_deconv_arg(struct DeconvArgs *arg, framework::Tensor *input,
                     framework::Tensor *out, framework::Tensor *filter,
qnqinan's avatar
qnqinan 已提交
588 589 590
                     ActivationType activation_enable,
                     int16_t leaky_relu_negative_slope, int group_num,
                     int stride_h, int stride_w, int padding_h, int padding_w,
Z
zhangyang 已提交
591
                     float *bs_ptr) {
592 593
  auto input_ptr = input->data<half>();
  auto filter_ptr = filter->data<int8_t>();
Z
zhangyang 已提交
594
  auto deleter = [](void *p) { fpga_free(p); };
Z
zhangyang 已提交
595 596

  arg->group_num = (uint32_t)group_num;
597
  arg->sub_conv_num = (uint32_t)stride_h;
Z
zhangyang 已提交
598
  arg->filter_num = (uint32_t)filter->dims()[0];
599
  uint32_t sub_conv_num = arg->sub_conv_num;
600 601 602
  int sub_pad =
      deconv_filter::deconv_calc_sub_pad((int)filter->dims()[3],  // NOLINT
                                         padding_w, stride_w);
603
  auto sub_filter_width = (uint32_t)deconv_filter::deconv_get_sub_filter_axis(
604
      (int)filter->dims()[3], stride_w);  // NOLINT
605

606
  auto sub_output_width = (uint32_t)deconv_filter::deconv_get_sub_out_axis(
607
      (int)input->dims()[3], sub_pad, sub_filter_width);  // NOLINT
608
  auto sub_output_height = (uint32_t)deconv_filter::deconv_get_sub_out_axis(
609
      (int)input->dims()[2], sub_pad, sub_filter_width);  // NOLINT
Z
zhangyang 已提交
610

611 612 613
  arg->sub_output_width = (uint32_t)sub_output_width;
  arg->sub_output_height = (uint32_t)sub_output_height;
  arg->omit_size = (uint32_t)deconv_filter::deconv_get_omit(
614
      stride_w, (int)filter->dims()[3], padding_w);  // NOLINT
Z
zhangyang 已提交
615

616
  auto sub_channels = (int)input->dims()[1];  // NOLINT
617
  uint32_t omit_size = arg->omit_size;
Z
zhangyang 已提交
618
  int real_out_width = sub_output_width * sub_conv_num - 2 * omit_size;
Z
zhangyang 已提交
619 620
  int sub_filter_num = sub_conv_num * (arg->filter_num);

621 622 623
  framework::DDim dims_out_new = framework::make_ddim(
      {1, arg->filter_num, sub_output_height * sub_conv_num, real_out_width});
  fpga::format_fp16_ofm(out, dims_out_new);
624
  auto out_ptr = out->data<half>();
625
  arg->output.address =
626
      out_ptr +
627 628 629 630 631
      omit_size * sizeof(half) *
          (align_to_x(real_out_width * arg->filter_num, IMAGE_ALIGNMENT));
  arg->output.scale_address = out->scale;

  uint32_t conv_output_size =
Z
zhangyang 已提交
632 633
      (align_to_x(sub_output_width * sub_filter_num, IMAGE_ALIGNMENT)) *
      sub_output_height;
634
  uint32_t split_num =
Z
zhangyang 已提交
635 636
      group_num == 1 ? (uint32_t)get_deconv_plit_num(filter, sub_conv_num) : 1;

Z
zhangyang 已提交
637
  for (int i = 0; i < sub_conv_num; ++i) {
Z
zhangyang 已提交
638 639
    arg->split_conv_args.push_back(std::make_shared<SplitConvArgs>());
    arg->split_conv_args[i]->filter_num =
Z
zhangyang 已提交
640
        (arg->sub_conv_num) * (arg->filter_num);
Z
zhangyang 已提交
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
    arg->split_conv_args[i]->group_num = (uint32_t)group_num;
    arg->split_conv_args[i]->split_num = split_num;
    arg->split_conv_args[i]->concat_arg.height = sub_output_height;
    arg->split_conv_args[i]->concat_arg.width = sub_output_width;
    arg->split_conv_args[i]->concat_arg.image_num = split_num;

    arg->split_conv_args[i]->conv_arg =
        static_cast<ConvArgs *>(fpga_malloc(split_num * sizeof(ConvArgs)));
    arg->split_conv_args[i]->concat_arg.images_in =
        static_cast<int16_t **>(fpga_malloc(split_num * sizeof(int16_t *)));
    arg->split_conv_args[i]->concat_arg.scales_in =
        static_cast<float **>(fpga_malloc(split_num * sizeof(float *)));
    arg->split_conv_args[i]->concat_arg.channel_num =
        static_cast<uint32_t *>(fpga_malloc(split_num * sizeof(uint32_t)));
    arg->split_conv_args[i]->shared_conv_arg =
        std::shared_ptr<ConvArgs>(arg->split_conv_args[i]->conv_arg, deleter);
    arg->split_conv_args[i]->vector_concat_space.push_back(
        std::shared_ptr<char>(
            reinterpret_cast<char *>(
                arg->split_conv_args[i]->concat_arg.images_in),
            deleter));
    arg->split_conv_args[i]->vector_concat_space.push_back(
        std::shared_ptr<char>(
            reinterpret_cast<char *>(
                arg->split_conv_args[i]->concat_arg.scales_in),
            deleter));
    arg->split_conv_args[i]->vector_concat_space.push_back(
        std::shared_ptr<char>(
            reinterpret_cast<char *>(
                arg->split_conv_args[i]->concat_arg.channel_num),
            deleter));
Z
zhangyang 已提交
672
  }
Z
zhangyang 已提交
673

674 675
  auto filter_num_per_div =
      (uint32_t)get_deconv_filter_num_per_div(filter, group_num, stride_w);
Z
zhangyang 已提交
676
  int element_num = get_aligned_filter_element_num(
677
      (int)(sub_channels * sub_filter_width * sub_filter_width));  // NOLINT
Z
zhangyang 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692

  int chw = sub_channels * sub_filter_width * sub_filter_width;
  int division_capacity = filter::calc_division_capacity(chw);
  int num_per_div_before_alignment =
      filter::calc_num_per_div(sub_filter_num, group_num, division_capacity);
  int num_per_div_after_alignment =
      align_to_x(num_per_div_before_alignment, FILTER_NUM_ALIGNMENT);
  int div_num = (sub_filter_num + num_per_div_before_alignment - 1) /
                num_per_div_before_alignment;
  int residual = sub_filter_num % num_per_div_before_alignment;
  int num_after_alignment = num_per_div_after_alignment *
                                ((residual == 0) ? div_num : (div_num - 1)) +
                            align_to_x(residual, FILTER_NUM_ALIGNMENT);

  int filter_sub_conv_offset = element_num * num_after_alignment;
693
  uint32_t out_addr_offset = 0;
Z
zhangyang 已提交
694
  for (int i = 0; i < sub_conv_num; ++i) {
Z
zhangyang 已提交
695
    if (sub_conv_num == 1) {
Z
zhangyang 已提交
696 697
      arg->split_conv_args[i]->output.address = arg->output.address;
      arg->split_conv_args[i]->output.scale_address = arg->output.scale_address;
698
      out_addr_offset = 0;
Z
zhangyang 已提交
699

Z
zhangyang 已提交
700
    } else {
701
      out_addr_offset =
Z
zhangyang 已提交
702
          sizeof(int16_t) * (sub_conv_num - 1 - i) *
703 704
          (align_to_x(real_out_width * arg->filter_num, IMAGE_ALIGNMENT));

Z
zhangyang 已提交
705 706 707 708 709 710 711 712
      arg->split_conv_args[i]->output.address = out_ptr;
      arg->split_conv_args[i]->output.scale_address =
          static_cast<float *>(fpga_malloc(2 * sizeof(float)));
      arg->split_conv_args[i]->vector_conv_space.push_back(
          std::shared_ptr<char>(
              reinterpret_cast<char *>(
                  arg->split_conv_args[i]->output.scale_address),
              deleter));
Z
zhangyang 已提交
713 714
    }

Z
zhangyang 已提交
715
    for (int j = 0; j < split_num; ++j) {
qnqinan's avatar
qnqinan 已提交
716 717 718 719 720 721
      arg->split_conv_args[i]->conv_arg[j].output.activation.activation_type =
          activation_enable;
      arg->split_conv_args[i]
          ->conv_arg[j]
          .output.activation.leaky_relu_negative_slope =
          leaky_relu_negative_slope;
Z
zhangyang 已提交
722
      arg->split_conv_args[i]->conv_arg[j].group_num = (uint32_t)group_num;
Z
zhangyang 已提交
723

Z
zhangyang 已提交
724
      arg->split_conv_args[i]->conv_arg[j].kernel.width =
Z
zhangyang 已提交
725
          (uint32_t)sub_filter_width;
Z
zhangyang 已提交
726
      arg->split_conv_args[i]->conv_arg[j].kernel.height =
Z
zhangyang 已提交
727
          (uint32_t)sub_filter_width;
Z
zhangyang 已提交
728 729
      arg->split_conv_args[i]->conv_arg[j].kernel.stride_w = 1;
      arg->split_conv_args[i]->conv_arg[j].kernel.stride_h = 1;
Z
zhangyang 已提交
730

Z
zhangyang 已提交
731 732
      arg->split_conv_args[i]->conv_arg[j].deconv_tx_param.deconv_en = 1;
      arg->split_conv_args[i]->conv_arg[j].deconv_tx_param.sub_conv_num =
733
          sub_conv_num;
Z
zhangyang 已提交
734 735 736
      arg->split_conv_args[i]->conv_arg[j].deconv_tx_param.omit_size =
          omit_size;
      arg->split_conv_args[i]->conv_arg[j].deconv_tx_param.out_addr_offset =
737 738
          out_addr_offset;

Z
zhangyang 已提交
739 740
      arg->split_conv_args[i]->conv_arg[j].image.scale_address = input->scale;
      arg->split_conv_args[i]->conv_arg[j].image.channels =
Z
zhangyang 已提交
741
          (uint32_t)sub_channels;
Z
zhangyang 已提交
742
      arg->split_conv_args[i]->conv_arg[j].image.width =
Z
zhangyang 已提交
743
          (uint32_t)input->dims()[3];
Z
zhangyang 已提交
744
      arg->split_conv_args[i]->conv_arg[j].image.height =
Z
zhangyang 已提交
745
          (uint32_t)input->dims()[2];
Z
zhangyang 已提交
746 747 748
      arg->split_conv_args[i]->conv_arg[j].image.pad_width = (uint32_t)sub_pad;
      arg->split_conv_args[i]->conv_arg[j].image.pad_height = (uint32_t)sub_pad;
      arg->split_conv_args[i]->conv_arg[j].image.address = input_ptr;
Z
zhangyang 已提交
749

Z
zhangyang 已提交
750 751
      arg->split_conv_args[i]->conv_arg[j].filter_scale_address = filter->scale;
      arg->split_conv_args[i]->conv_arg[j].filter_num =
752 753 754
          (uint32_t)(j == split_num - 1
                         ? sub_filter_num - (split_num - 1) * filter_num_per_div
                         : filter_num_per_div);
Z
zhangyang 已提交
755 756 757

      size_t filter_size =
          element_num *
Z
zhangyang 已提交
758
          align_to_x(arg->split_conv_args[i]->conv_arg[j].filter_num,
Z
zhangyang 已提交
759 760
                     FILTER_NUM_ALIGNMENT) *
          sizeof(int8_t);
761 762 763
      auto filter_head =
          &filter_ptr[j * element_num * filter_num_per_div +  // NOLINT
                      i * filter_sub_conv_offset];
Z
zhangyang 已提交
764
      arg->split_conv_args[i]->conv_arg[j].filter_address =
Z
zhangyang 已提交
765
          fpga_malloc(filter_size);
Z
zhangyang 已提交
766 767 768 769 770 771 772
      arg->split_conv_args[i]->vector_conv_space.push_back(
          std::shared_ptr<char>(
              reinterpret_cast<char *>(
                  arg->split_conv_args[i]->conv_arg[j].filter_address),
              deleter));

      memcpy(arg->split_conv_args[i]->conv_arg[j].filter_address, filter_head,
Z
zhangyang 已提交
773
             filter_size);
Z
zhangyang 已提交
774
      fpga_flush(arg->split_conv_args[i]->conv_arg[j].filter_address,
Z
zhangyang 已提交
775 776 777
                 filter_size);

      size_t bs_align_num = align_to_x(
Z
zhangyang 已提交
778
          arg->split_conv_args[i]->conv_arg[j].filter_num, BS_NUM_ALIGNMENT);
Z
zhangyang 已提交
779 780 781
      size_t bs_size = 2 * bs_align_num * sizeof(float);
      auto bs_head = &bs_ptr[j * filter_num_per_div * 2];

Z
zhangyang 已提交
782 783 784 785 786 787 788 789 790
      arg->split_conv_args[i]->conv_arg[j].sb_address = fpga_malloc(bs_size);
      arg->split_conv_args[i]->vector_conv_space.push_back(
          std::shared_ptr<char>(
              reinterpret_cast<char *>(
                  arg->split_conv_args[i]->conv_arg[j].sb_address),
              deleter));

      memcpy(arg->split_conv_args[i]->conv_arg[j].sb_address, bs_head, bs_size);
      fpga_flush(arg->split_conv_args[i]->conv_arg[j].sb_address, bs_size);
Z
zhangyang 已提交
791 792

      if (split_num == 1) {
Z
zhangyang 已提交
793 794 795 796
        arg->split_conv_args[i]->conv_arg[j].output.address =
            arg->split_conv_args[i]->output.address;
        arg->split_conv_args[i]->conv_arg[j].output.scale_address =
            arg->split_conv_args[i]->output.scale_address;
Z
zhangyang 已提交
797
      } else {
Z
zhangyang 已提交
798 799 800 801 802 803 804 805 806 807 808 809 810 811
        arg->split_conv_args[i]->conv_arg[j].output.address =
            fpga_malloc(conv_output_size * sizeof(int16_t));
        arg->split_conv_args[i]->conv_arg[j].output.scale_address =
            static_cast<float *>(fpga_malloc(2 * sizeof(float)));
        arg->split_conv_args[i]->vector_conv_space.push_back(
            std::shared_ptr<char>(
                reinterpret_cast<char *>(
                    arg->split_conv_args[i]->conv_arg[j].output.address),
                deleter));
        arg->split_conv_args[i]->vector_conv_space.push_back(
            std::shared_ptr<char>(
                reinterpret_cast<char *>(
                    arg->split_conv_args[i]->conv_arg[j].output.scale_address),
                deleter));
Z
zhangyang 已提交
812
      }
813
      arg->split_conv_args[i]->concat_arg.images_in[j] = static_cast<half *>(
Z
zhangyang 已提交
814 815 816 817 818 819 820
          arg->split_conv_args[i]->conv_arg[j].output.address);
      arg->split_conv_args[i]->concat_arg.scales_in[j] =
          arg->split_conv_args[i]->conv_arg[j].output.scale_address;
      arg->split_conv_args[i]->concat_arg.channel_num[j] =
          arg->split_conv_args[i]->conv_arg[j].filter_num;

      expand_conv_arg(&(arg->split_conv_args[i]->conv_arg[j]));
Z
zhangyang 已提交
821 822
    }

Z
zhangyang 已提交
823 824 825 826
    arg->split_conv_args[i]->concat_arg.image_out =
        arg->split_conv_args[i]->output.address;
    arg->split_conv_args[i]->concat_arg.scale_out =
        arg->split_conv_args[i]->output.scale_address;
Z
zhangyang 已提交
827
  }
828
  filter->reset_data_ptr(nullptr);
Z
zhangyang 已提交
829
  fpga_free(bs_ptr);
830 831
}  // fill_deconv_arg

832 833
void fill_dwconv_arg(struct DWconvArgs *arg, framework::Tensor *input,
                     framework::Tensor *out, framework::Tensor *filter,
qnqinan's avatar
qnqinan 已提交
834 835 836 837
                     ActivationType activation_enable,
                     int16_t leaky_relu_negative_slope, int stride_h,
                     int stride_w, int padding_h, int padding_w,
                     float *bias_ptr) {
838 839 840 841
  auto deleter = [](void *p) { fpga_free(p); };
  arg->vector_dwconv_space.push_back(
      std::shared_ptr<char>(reinterpret_cast<char *>(bias_ptr), deleter));

842 843 844
  auto filter_ptr = filter->data<uint8_t>();
  auto input_ptr = input->data<half>();
  auto output_ptr = out->mutable_data<half>();
845
  arg->sub_conv_num = 1;
qnqinan's avatar
qnqinan 已提交
846 847 848
  // arg->relu_enabled = relu_enabled;
  arg->output.activation.activation_type = activation_enable;
  arg->output.activation.leaky_relu_negative_slope = leaky_relu_negative_slope;
849 850
  arg->bias_address = bias_ptr;
  arg->filter_address = filter_ptr;
Z
zhangyang 已提交
851 852 853 854
  arg->kernel.height = (uint32_t)filter->dims()[2];
  arg->kernel.width = (uint32_t)filter->dims()[3];
  arg->kernel.stride_h = (uint32_t)stride_h;
  arg->kernel.stride_w = (uint32_t)stride_w;
855 856 857 858
  arg->image.address = input_ptr;
  arg->image.channels = (uint32_t)input->dims()[1];
  arg->image.height = (uint32_t)input->dims()[2];
  arg->image.width = (uint32_t)input->dims()[3];
Z
zhangyang 已提交
859 860
  arg->image.pad_height = (uint32_t)padding_h;
  arg->image.pad_width = (uint32_t)padding_w;
861 862 863 864 865
  arg->image.scale_address = input->scale;
  arg->output.address = output_ptr;
  arg->output.scale_address = out->scale;
}  // end dwconv arg fill

qnqinan's avatar
qnqinan 已提交
866 867
void fill_DWDeconv_arg(struct DWDeconvArgs *arg, framework::Tensor *input,
                       framework::Tensor *out, framework::Tensor *filter,
qnqinan's avatar
qnqinan 已提交
868 869 870 871
                       ActivationType activation_enable,
                       int16_t leaky_relu_negative_slope, int stride_h,
                       int stride_w, int padding_h, int padding_w,
                       float *bias_ptr) {
872 873
  auto filter_ptr = filter->data<int8_t>();
  auto input_ptr = input->data<half>();
qnqinan's avatar
qnqinan 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907

  auto deleter = [](void *p) { fpga_free(p); };

  arg->group_num = (uint32_t)filter->dims()[0];
  arg->sub_conv_num = (uint32_t)stride_w;
  arg->filter_num = (uint32_t)filter->dims()[0];

  int sub_conv_num = stride_w;

  int sub_pad =
      deconv_filter::deconv_calc_sub_pad((int)filter->dims()[3],  // NOLINT
                                         padding_w, stride_w);
  auto sub_filter_width = (uint32_t)deconv_filter::deconv_get_sub_filter_axis(
      (int)filter->dims()[3], stride_w);  // NOLINT

  auto sub_output_width = (uint32_t)deconv_filter::deconv_get_sub_out_axis(
      (int)input->dims()[3], sub_pad, sub_filter_width);  // NOLINT
  auto sub_output_height = (uint32_t)deconv_filter::deconv_get_sub_out_axis(
      (int)input->dims()[2], sub_pad, sub_filter_width);  // NOLINT

  arg->sub_output_width = (uint32_t)sub_output_width;
  arg->sub_output_height = (uint32_t)sub_output_height;
  arg->omit_size = (uint32_t)deconv_filter::deconv_get_omit(
      stride_w, (int)filter->dims()[3], padding_w);  // NOLINT

  auto sub_channels = (int)input->dims()[1];  // NOLINT
  uint32_t omit_size = arg->omit_size;
  int real_out_width = sub_output_width * sub_conv_num - 2 * omit_size;
  int real_out_height = sub_output_height * sub_conv_num - 2 * omit_size;
  int sub_filter_num = sub_conv_num * (arg->filter_num);

  framework::DDim dims_out_new = framework::make_ddim(
      {1, arg->filter_num, real_out_height, real_out_width});
  fpga::format_fp16_ofm(out, dims_out_new);
908
  auto out_ptr = out->data<half>();
qnqinan's avatar
qnqinan 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926

  /*====For Addition
  arg->output.address =
      (half *)out_ptr +  // NOLINT
      omit_size * sizeof(half) *
          (align_to_x(real_out_width * arg->filter_num, IMAGE_ALIGNMENT));
          */
  arg->output.address = out_ptr;
  arg->output.scale_address = out->scale;

  int filter_offset = sub_filter_width * sub_filter_width *
                      align_to_x(sub_channels, FILTER_ELEMENT_ALIGNMENT) *
                      arg->sub_conv_num;

  for (int i = 0; i < sub_conv_num; ++i) {
    arg->dw_conv_args.push_back(std::make_shared<DWconvArgs>());

    arg->dw_conv_args[i]->sub_conv_num = sub_conv_num;
qnqinan's avatar
qnqinan 已提交
927 928 929 930
    // arg->dw_conv_args[i]->relu_enabled = relu_enabled;
    arg->dw_conv_args[i]->output.activation.activation_type = activation_enable;
    arg->dw_conv_args[i]->output.activation.leaky_relu_negative_slope =
        leaky_relu_negative_slope;
qnqinan's avatar
qnqinan 已提交
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
    arg->dw_conv_args[i]->bias_address = bias_ptr;

    arg->dw_conv_args[i]->filter_address =
        fpga_malloc(filter_offset * sizeof(int16_t));
    memcpy(arg->dw_conv_args[i]->filter_address,
           (reinterpret_cast<half *>(filter_ptr) + i * filter_offset),
           filter_offset * sizeof(int16_t));
    arg->vector_dw_conv_space.push_back(std::shared_ptr<char>(
        reinterpret_cast<char *>(arg->dw_conv_args[i]->filter_address),
        deleter));

    arg->dw_conv_args[i]->kernel.height = (uint32_t)sub_filter_width;
    arg->dw_conv_args[i]->kernel.width = (uint32_t)sub_filter_width;

    arg->dw_conv_args[i]->kernel.stride_h = (uint32_t)1;
    arg->dw_conv_args[i]->kernel.stride_w = (uint32_t)1;
    arg->dw_conv_args[i]->image.address = input_ptr;
    arg->dw_conv_args[i]->image.channels = (uint32_t)input->dims()[1];
    arg->dw_conv_args[i]->image.height = (uint32_t)input->dims()[2];
    arg->dw_conv_args[i]->image.width = (uint32_t)input->dims()[3];

    arg->dw_conv_args[i]->image.pad_height = sub_pad;
    arg->dw_conv_args[i]->image.pad_width = sub_pad;
    arg->dw_conv_args[i]->image.scale_address = input->scale;

    arg->dw_conv_args[i]->output.address =
        fpga_malloc(sub_output_height *
                    align_to_x(sub_output_width * sub_channels * sub_conv_num,
                               IMAGE_ALIGNMENT) *
                    sizeof(int16_t));
    arg->dw_conv_args[i]->output.scale_address =
        static_cast<float *>(fpga_malloc(2 * sizeof(float)));
J
jameswu2014 已提交
963
    arg->vector_dw_conv_space.push_back(std::shared_ptr<char>(  // NOLINT
qnqinan's avatar
qnqinan 已提交
964 965
        reinterpret_cast<char *>(arg->dw_conv_args[i]->output.address),
        deleter));
J
jameswu2014 已提交
966
    arg->vector_dw_conv_space.push_back(std::shared_ptr<char>(  // NOLINT
qnqinan's avatar
qnqinan 已提交
967 968 969 970 971 972 973
        reinterpret_cast<char *>(arg->dw_conv_args[i]->output.scale_address),
        deleter));
  }

  // arg->output.scale_address = out->scale;
}  // end dwconv arg fill

H
hanbuhe 已提交
974
}  // namespace fpga
Z
zhangyang 已提交
975
}  // namespace paddle_mobile