diff --git a/src/fpga/V1/api.cpp b/src/fpga/V1/api.cpp index a8540e72f3ad077ac4aa49e34b535675f04dcd16..9a408a8f2fbe3c600679ddb2e3eadb493f323165 100644 --- a/src/fpga/V1/api.cpp +++ b/src/fpga/V1/api.cpp @@ -346,9 +346,9 @@ void expand_conv_arg(ConvArgs *arg) { auto filter_pad_width_mul_channel = args.image.pad_width * args.image.channels; auto image_amount_per_row_multi_win_first = - image_amount_per_row * (4 * args.kernel.stride_h - args.image.pad_height); + image_amount_per_row * (2 * args.kernel.stride_h - args.image.pad_height); auto image_amount_per_row_multi_win = - image_amount_per_row * (4 * args.kernel.stride_h); + image_amount_per_row * (2 * args.kernel.stride_h); auto image_block_num = block_num; auto image_block_len = @@ -375,7 +375,8 @@ void expand_conv_arg(ConvArgs *arg) { (512 / (align_to_x(args.filter_num, 4) / 4 * 2) > 2) ? (512 / (align_to_x(args.filter_num, 4) / 4 * 2) - 2) : 0; - auto cmd = 0UL | (args.relu_enabled ? USE_RELU : 0) | USE_BIAS; + // auto cmd = 0UL | (args.relu_enabled ? USE_RELU : 0) | USE_BIAS; + auto cmd = 0UL | USE_BIAS; auto deconv_param = ((args.deconv_tx_param.deconv_en) << 24) | ((args.deconv_tx_param.sub_conv_num) << 16) | @@ -413,7 +414,8 @@ void expand_conv_arg(ConvArgs *arg) { void expand_EW_arg(EWAddArgs *arg) { EWAddArgs args = *arg; - uint64_t cmd = args.relu_enabled ? USE_RELU : 0; + // uint64_t cmd = args.relu_enabled ? USE_RELU : 0; + uint64_t cmd = 0; uint64_t datalen = (uint64_t)args.image0.width * (uint64_t)args.image0.height * (uint64_t)args.image0.channels; @@ -441,8 +443,10 @@ void expand_EW_arg(EWAddArgs *arg) { void fill_split_arg(struct SplitConvArgs *arg, framework::Tensor *input, framework::Tensor *out, framework::Tensor *filter, - bool relu_enabled, int group_num, int stride_h, - int stride_w, int padding_h, int padding_w, float *bs_ptr) { + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int group_num, + int stride_h, int stride_w, int padding_h, int padding_w, + float *bs_ptr) { auto input_ptr = input->data(); auto filter_ptr = filter->data(); auto out_ptr = out->data(); @@ -488,7 +492,10 @@ void fill_split_arg(struct SplitConvArgs *arg, framework::Tensor *input, filter->dims()[3])); for (int i = 0; i < n; i++) { - arg->conv_arg[i].relu_enabled = relu_enabled; + // arg->conv_arg[i].relu_enabled = relu_enabled; + arg->conv_arg[i].output.activation.activation_type = activation_enable; + arg->conv_arg[i].output.activation.leaky_relu_negative_slope = + leaky_relu_negative_slope; arg->conv_arg[i].group_num = (uint32_t)group_num; arg->conv_arg[i].kernel.stride_h = (uint32_t)stride_h; arg->conv_arg[i].kernel.stride_w = (uint32_t)stride_w; @@ -560,8 +567,9 @@ void fill_split_arg(struct SplitConvArgs *arg, framework::Tensor *input, void fill_deconv_arg(struct DeconvArgs *arg, framework::Tensor *input, framework::Tensor *out, framework::Tensor *filter, - bool relu_enabled, int group_num, int stride_h, - int stride_w, int padding_h, int padding_w, + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int group_num, + int stride_h, int stride_w, int padding_h, int padding_w, float *bs_ptr) { auto input_ptr = input->data(); auto filter_ptr = filter->data(); @@ -687,7 +695,13 @@ void fill_deconv_arg(struct DeconvArgs *arg, framework::Tensor *input, } for (int j = 0; j < split_num; ++j) { - arg->split_conv_args[i]->conv_arg[j].relu_enabled = relu_enabled; + // arg->split_conv_args[i]->conv_arg[j].relu_enabled = relu_enabled; + arg->split_conv_args[i]->conv_arg[j].output.activation.activation_type = + activation_enable; + arg->split_conv_args[i] + ->conv_arg[j] + .output.activation.leaky_relu_negative_slope = + leaky_relu_negative_slope; arg->split_conv_args[i]->conv_arg[j].group_num = (uint32_t)group_num; arg->split_conv_args[i]->conv_arg[j].kernel.width = @@ -800,13 +814,17 @@ void fill_deconv_arg(struct DeconvArgs *arg, framework::Tensor *input, void fill_dwconv_arg(struct DWconvArgs *arg, framework::Tensor *input, framework::Tensor *out, framework::Tensor *filter, - bool relu_enabled, int stride_h, int stride_w, - int padding_h, int padding_w, float *bias_ptr) { + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int stride_h, + int stride_w, int padding_h, int padding_w, + float *bias_ptr) { auto filter_ptr = filter->data(); auto input_ptr = input->data(); auto output_ptr = out->mutable_data(); arg->sub_conv_num = 1; - arg->relu_enabled = relu_enabled; + // arg->relu_enabled = relu_enabled; + arg->output.activation.activation_type = activation_enable; + arg->output.activation.leaky_relu_negative_slope = leaky_relu_negative_slope; arg->bias_address = bias_ptr; arg->filter_address = filter_ptr; arg->kernel.height = (uint32_t)filter->dims()[2]; @@ -826,8 +844,10 @@ void fill_dwconv_arg(struct DWconvArgs *arg, framework::Tensor *input, void fill_DWDeconv_arg(struct DWDeconvArgs *arg, framework::Tensor *input, framework::Tensor *out, framework::Tensor *filter, - bool relu_enabled, int stride_h, int stride_w, - int padding_h, int padding_w, float *bias_ptr) { + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int stride_h, + int stride_w, int padding_h, int padding_w, + float *bias_ptr) { auto filter_ptr = filter->data(); auto input_ptr = input->data(); auto output_ptr = out->mutable_data(); @@ -884,7 +904,10 @@ void fill_DWDeconv_arg(struct DWDeconvArgs *arg, framework::Tensor *input, arg->dw_conv_args.push_back(std::make_shared()); arg->dw_conv_args[i]->sub_conv_num = sub_conv_num; - arg->dw_conv_args[i]->relu_enabled = relu_enabled; + // arg->dw_conv_args[i]->relu_enabled = relu_enabled; + arg->dw_conv_args[i]->output.activation.activation_type = activation_enable; + arg->dw_conv_args[i]->output.activation.leaky_relu_negative_slope = + leaky_relu_negative_slope; arg->dw_conv_args[i]->bias_address = bias_ptr; arg->dw_conv_args[i]->filter_address = diff --git a/src/fpga/V1/api.h b/src/fpga/V1/api.h index 05d6a938c85f14770b97cd477580d0e6103fa777..05a30ddce4828bf8ac0f049ea0db4f18dc1dba79 100644 --- a/src/fpga/V1/api.h +++ b/src/fpga/V1/api.h @@ -47,20 +47,28 @@ void format_concat_output(framework::Tensor* out, int height, int width, void fill_split_arg(struct SplitConvArgs* arg, framework::Tensor* input, framework::Tensor* out, framework::Tensor* filter, - bool relu_enabled, int group_num, int stride_h, - int stride_w, int padding_h, int padding_w, float* bs_ptr); + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int group_num, + int stride_h, int stride_w, int padding_h, int padding_w, + float* bs_ptr); void fill_deconv_arg(struct DeconvArgs* arg, framework::Tensor* input, framework::Tensor* out, framework::Tensor* filter, - bool relu_enabled, int group_num, int stride_h, - int stride_w, int padding_h, int padding_w, float* bs_ptr); + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int group_num, + int stride_h, int stride_w, int padding_h, int padding_w, + float* bs_ptr); void fill_dwconv_arg(struct DWconvArgs* arg, framework::Tensor* input, framework::Tensor* out, framework::Tensor* filter, - bool relu_enabled, int stride_h, int stride_w, - int padding_h, int padding_w, float* bias_ptr); + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int stride_h, + int stride_w, int padding_h, int padding_w, + float* bias_ptr); void fill_DWDeconv_arg(struct DWDeconvArgs* arg, framework::Tensor* input, framework::Tensor* out, framework::Tensor* filter, - bool relu_enabled, int stride_h, int stride_w, - int padding_h, int padding_w, float* bs_ptr); + ActivationType activation_enable, + int16_t leaky_relu_negative_slope, int stride_h, + int stride_w, int padding_h, int padding_w, + float* bs_ptr); void format_deconv_filter(framework::Tensor* filter_tensor, float max_value, int group_num, int stride); diff --git a/src/fpga/V1/deconv_filter.cpp b/src/fpga/V1/deconv_filter.cpp index 4c484a45d0a36db4aac677377ae11b5235603ac6..7c87452f5a7264ad069d8508cb1e9dc24f5cdc3d 100644 --- a/src/fpga/V1/deconv_filter.cpp +++ b/src/fpga/V1/deconv_filter.cpp @@ -19,7 +19,6 @@ limitations under the License. */ #include "fpga/V1/filter.h" // #include "filter.h" #include "fpga/V1/api.h" -// #include "fpga_api.h" namespace paddle_mobile { namespace fpga { diff --git a/src/fpga/V1/pe.cpp b/src/fpga/V1/pe.cpp index 16d3bc793389f49ad0b6e3bf3b064a880e4a927a..5a81e2422979f08b2113bd9b46022fe4d77154cb 100644 --- a/src/fpga/V1/pe.cpp +++ b/src/fpga/V1/pe.cpp @@ -63,6 +63,7 @@ using namespace std; // NOLINT #define REG_TIMER_COUNTER 0x070 #define REG_SCALE_PARAMETER 0x080 +#define REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR 0x090 #define REG_FLASH_CMD 0x200 #define REG_FLASH_DATA 0x208 @@ -189,8 +190,8 @@ int ComputeFpgaConv(const struct SplitConvArgs &args) { int ComputeBasicConv(const struct ConvArgs &args) { #ifdef FPGA_PRINT_MODE DLOG << "======Compute Basic Conv======"; - DLOG << " relu_enabled:" << args.relu_enabled - << " sb_address:" << args.sb_address + // DLOG << " relu_enabled:" << args.relu_enabled + DLOG << " sb_address:" << args.sb_address << " filter_address:" << args.filter_address << " filter_num:" << args.filter_num << " group_num:" << args.group_num; @@ -212,6 +213,25 @@ int ComputeBasicConv(const struct ConvArgs &args) { #ifdef PADDLE_MOBILE_ZU5 int ret = 0; uint64_t output_scale = 0; + + uint64_t reg_ActivationArgs = 0; + // active function:{none,leakeyrelu,sigmoid,tanh} + ActivationArgs active_args; + // active_args.activation_type = LEAKYRELU; + + active_args.activation_type = args.output.activation.activation_type; + + active_args.leaky_relu_negative_slope = + args.output.activation.leaky_relu_negative_slope; + + reg_ActivationArgs = (uint64_t(active_args.activation_type) << 32) | + active_args.leaky_relu_negative_slope; + + DLOG << " activation_type:" << active_args.activation_type + << " leaky_relu_negative_slope:" + << active_args.leaky_relu_negative_slope; + DLOG << " reg_ActivationArgs:" << reg_ActivationArgs; + pthread_mutex_lock(&g_fpgainfo.pe_data->mutex); if (ERROR == g_fpgainfo.pe_data->pes[PE_IDX_CONV]->status) { ret = -EIO; @@ -219,6 +239,10 @@ int ComputeBasicConv(const struct ConvArgs &args) { pthread_mutex_unlock(&g_fpgainfo.pe_data->mutex); return ret; } + + reg_writeq(reg_ActivationArgs, + REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); // active functoion + reg_writeq(output_scale, REG_SCALE_PARAMETER); reg_writeq( ((uint64_t)args.image.height) | (((uint64_t)args.image.width) << 32), @@ -278,6 +302,9 @@ int ComputeBasicConv(const struct ConvArgs &args) { output_scale = (output_scale << 32) | (output_scale >> 32); fpga_copy(args.output.scale_address, &output_scale, sizeof(float) * 2); + active_args.activation_type = NONE; + reg_writeq(reg_ActivationArgs, REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); + pthread_mutex_unlock(&g_fpgainfo.pe_data->mutex); return ret; @@ -314,6 +341,23 @@ int ComputeFpgaPool(const struct PoolingArgs &args) { uint64_t image_physical_address = 0; uint64_t output_physical_address = 0; + uint64_t reg_ActivationArgs = 0; + // active function:{none,leakeyrelu,sigmoid,tanh} + ActivationArgs active_args; + // active_args.activation_type = LEAKYRELU; + active_args.activation_type = args.output.activation.activation_type; + + active_args.leaky_relu_negative_slope = + args.output.activation.leaky_relu_negative_slope; + + reg_ActivationArgs = (uint64_t(active_args.activation_type) << 32) | + active_args.leaky_relu_negative_slope; + + DLOG << " activation_type:" << active_args.activation_type + << " leaky_relu_negative_slope:" + << active_args.leaky_relu_negative_slope; + DLOG << " reg_ActivationArgs:" << reg_ActivationArgs; + image_physical_address = vaddr_to_paddr_driver(args.image.address); output_physical_address = vaddr_to_paddr_driver(args.output.address); uint32_t output_height = (uint32_t)( @@ -364,6 +408,9 @@ int ComputeFpgaPool(const struct PoolingArgs &args) { return ret; } + reg_writeq(reg_ActivationArgs, + REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); // active functoion + reg_writeq(output_scale, REG_SCALE_PARAMETER); reg_writeq(image_physical_address, REG_POOLING_IMAGE_BASE_ADDR); reg_writeq(output_physical_address, REG_POOLING_RESULT_BASE_ADDR); @@ -408,6 +455,10 @@ int ComputeFpgaPool(const struct PoolingArgs &args) { output_scale = reg_readq(REG_SCALE_PARAMETER); output_scale = (output_scale << 32) | (output_scale >> 32); fpga_copy(args.output.scale_address, &output_scale, sizeof(float) * 2); + + active_args.activation_type = NONE; + reg_writeq(reg_ActivationArgs, REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); + pthread_mutex_unlock(&g_fpgainfo.pe_data->mutex); return ret; @@ -418,8 +469,8 @@ int ComputeFpgaPool(const struct PoolingArgs &args) { int ComputeFpgaEWAdd(const struct EWAddArgs &args) { #ifdef FPGA_PRINT_MODE DLOG << "=============ComputeFpgaEWAdd==========="; - DLOG << " relu_enabled:" << args.relu_enabled - << " const0:" << fp16_2_fp32(int16_t(args.const0)) + // DLOG << " relu_enabled:" << args.relu_enabled + DLOG << " const0:" << fp16_2_fp32(int16_t(args.const0)) << " const1:" << fp16_2_fp32(int16_t(args.const1)); DLOG << " image0_address:" << args.image0.address << " image0_scale_address:" << args.image0.scale_address @@ -441,6 +492,19 @@ int ComputeFpgaEWAdd(const struct EWAddArgs &args) { #ifdef PADDLE_MOBILE_ZU5 int ret = 0; uint64_t output_scale = 0; + + uint64_t reg_ActivationArgs = 0; + ActivationArgs active_args; + active_args.activation_type = args.output.activation.activation_type; + active_args.leaky_relu_negative_slope = + args.output.activation.leaky_relu_negative_slope; + reg_ActivationArgs = (uint64_t(active_args.activation_type) << 32) | + active_args.leaky_relu_negative_slope; + DLOG << " activation_type:" << active_args.activation_type + << " leaky_relu_negative_slope:" + << active_args.leaky_relu_negative_slope; + DLOG << " reg_ActivationArgs:" << reg_ActivationArgs; + pthread_mutex_lock(&g_fpgainfo.pe_data->mutex); if (ERROR == g_fpgainfo.pe_data->pes[PE_IDX_EW]->status) { ret = -EIO; @@ -449,6 +513,9 @@ int ComputeFpgaEWAdd(const struct EWAddArgs &args) { return ret; } + reg_writeq(reg_ActivationArgs, + REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); // active functoion + reg_writeq(output_scale, REG_SCALE_PARAMETER); reg_writeq(args.driver.image0_address_phy, REG_EW_IMAGE0_BASE_ADDR); reg_writeq(args.driver.image1_address_phy, REG_EW_IMAGE1_BASE_ADDR); @@ -468,6 +535,9 @@ int ComputeFpgaEWAdd(const struct EWAddArgs &args) { output_scale = reg_readq(REG_SCALE_PARAMETER); output_scale = (output_scale << 32) | (output_scale >> 32); fpga_copy(args.output.scale_address, &output_scale, sizeof(float) * 2); + active_args.activation_type = NONE; + reg_writeq(reg_ActivationArgs, REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); + pthread_mutex_unlock(&g_fpgainfo.pe_data->mutex); return ret; #endif @@ -501,6 +571,17 @@ int PerformBypass(const struct BypassArgs &args) { uint8_t data_cell_in = 0; uint8_t data_cell_out = 0; int ret = 0; + + uint64_t reg_ActivationArgs = 0; + ActivationArgs active_args; + active_args.activation_type = args.output.activation.activation_type; + + active_args.leaky_relu_negative_slope = + args.output.activation.leaky_relu_negative_slope; + + reg_ActivationArgs = (uint64_t(active_args.activation_type) << 32) | + active_args.leaky_relu_negative_slope; + datalen = (uint64_t)args.image.width * (uint64_t)args.image.height * (uint64_t)args.image.channels; datalen = align_to_x(datalen, 16); @@ -559,7 +640,6 @@ int PerformBypass(const struct BypassArgs &args) { (data_cell_out != SIZE_FP16 && data_cell_out != SIZE_FP32)) { return -EFAULT; } - pthread_mutex_lock(&g_fpgainfo.pe_data->mutex); if (ERROR == g_fpgainfo.pe_data->pes[PE_IDX_BYPASS]->status) { ret = -EIO; @@ -567,7 +647,8 @@ int PerformBypass(const struct BypassArgs &args) { pthread_mutex_unlock(&g_fpgainfo.pe_data->mutex); return ret; } - + reg_writeq(reg_ActivationArgs, + REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); // active functoion reg_writeq(output_scale, REG_SCALE_PARAMETER); reg_writeq(input_address_phy, REG_CONVERT_SRC_ADDR); reg_writeq(output_address_phy, REG_CONVERT_DST_ADDR); @@ -585,6 +666,7 @@ int PerformBypass(const struct BypassArgs &args) { output_scale = reg_readq(REG_SCALE_PARAMETER); output_scale = (output_scale << 32) | (output_scale >> 32); fpga_copy(args.output.scale_address, &output_scale, sizeof(float) * 2); + reg_writeq(reg_ActivationArgs, REG_ACTIVATION_MODE_AND_LEAKY_RELU_FACTOR); pthread_mutex_unlock(&g_fpgainfo.pe_data->mutex); return ret; #endif @@ -808,7 +890,7 @@ int ComputeFPGASplit(const struct SplitArgs &args) { int ComputeDWConv(const struct DWconvArgs &args) { #ifdef FPGA_PRINT_MODE DLOG << "=============ComputeDWConv==========="; - DLOG << " mode:" << args.relu_enabled; + // DLOG << " mode:" << args.relu_enabled; DLOG << " image_address:" << args.image.address << " image_scale_address:" << args.image.scale_address << " image_channels:" << args.image.channels @@ -831,7 +913,8 @@ int ComputeDWConv(const struct DWconvArgs &args) { uint64_t output_scale = 0; uint64_t timer_cnt = 0; int ret = 0; - uint64_t cmd = args.relu_enabled; + // uint64_t cmd = args.relu_enabled; + uint64_t cmd = 0; uint64_t image_physical_address = 0; uint64_t output_physical_address = 0; uint64_t filter_physical_address = 0; diff --git a/src/fpga/common/driver.cpp b/src/fpga/common/driver.cpp index 18a310b09cad4a741eb83453a09f3c94d4f0db05..b1d3559dbbb238ae24cc6224e2d253dab744dce1 100644 --- a/src/fpga/common/driver.cpp +++ b/src/fpga/common/driver.cpp @@ -154,7 +154,6 @@ int memory_request(struct fpga_memory *memory, size_t size, uint64_t *addr) { unsigned int nr = (unsigned int)_nr; int ret = 0; uint64_t a_size = FPGA_PAGE_SIZE * nr; - DLOG << a_size; pthread_mutex_lock(&memory->mutex); @@ -391,9 +390,6 @@ int fpga_invalidate_driver(void *address, size_t size) { void fpga_copy_driver(void *dest, const void *src, size_t num) { uint64_t i; - - DLOG << "dest:" << dest << " src:" << src << " size:" << num; - for (i = 0; i < num; i++) { *((int8_t *)dest + i) = *((int8_t *)src + i); // NOLINT } diff --git a/src/fpga/common/driver.h b/src/fpga/common/driver.h index 4fa83b776e7b3df5df5e536de91093fd18ca67a1..d35627cd46b3f233255a98d1e1fbca27469f715c 100644 --- a/src/fpga/common/driver.h +++ b/src/fpga/common/driver.h @@ -29,7 +29,7 @@ namespace driver { #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) -#define FPGA_REG_PHY_ADDR 0xa0000000 +#define FPGA_REG_PHY_ADDR 0x80000000 #define FPGA_REG_SIZE 0x1000 #define FPGA_MEM_PHY_ADDR 0x40000000 #define FPGA_MEM_SIZE 0x80000000 diff --git a/src/fpga/common/fpga_common.h b/src/fpga/common/fpga_common.h index 25ca99613e91dcbab4ffedf3802f2025afdc040f..60753e5cde1e39a1dbf4a1016667db748fc6b9f9 100644 --- a/src/fpga/common/fpga_common.h +++ b/src/fpga/common/fpga_common.h @@ -45,6 +45,7 @@ enum ActivationType { LEAKYRELU = 1, SIGMOID = 2, TANH = 3, + SOFTMAX = 4, }; struct ActivationArgs { @@ -132,7 +133,7 @@ struct DeconvTxParm { #endif struct ConvArgs { - bool relu_enabled; + // bool relu_enabled; void* sb_address; // scale and bias void* filter_address; float* filter_scale_address; @@ -198,7 +199,7 @@ struct PoolingArgs { }; struct EWAddArgs { - bool relu_enabled; + // bool relu_enabled; uint32_t const0; // output0 = const0 x input0 + const1 x input1; uint32_t const1; struct ImageInputArgs image0; @@ -230,7 +231,7 @@ struct DeconvArgs { }; struct DWconvArgs { uint32_t sub_conv_num; - bool relu_enabled; + // bool relu_enabled; void* bias_address; void* filter_address; struct KernelArgs kernel; diff --git a/src/operators/activation_op.cpp b/src/operators/activation_op.cpp index bcff87c9276721c19a970eb328fc0a183ed6c003..76c9e1a014bc0e51b032d8516ba9448fa25b2aa5 100644 --- a/src/operators/activation_op.cpp +++ b/src/operators/activation_op.cpp @@ -31,6 +31,10 @@ DEFINE_ACTIVATION_INFERSHAPE(Relu6); #ifdef SIGMOID_OP DEFINE_ACTIVATION_INFERSHAPE(Sigmoid); +namespace ops = paddle_mobile::operators; +#ifdef PADDLE_MOBILE_FPGA +REGISTER_OPERATOR_FPGA(sigmoid, ops::SigmoidOp); +#endif #endif // SIGMOID_OP #ifdef TANH_OP diff --git a/src/operators/kernel/fpga/V1/conv_add_bn_kernel.cpp b/src/operators/kernel/fpga/V1/conv_add_bn_kernel.cpp index 30ff3155a47c813f303dc59191edd8b60e6d8ce3..3e41efdf76ed5b14d408a1278c7dba0bd1f30a1f 100644 --- a/src/operators/kernel/fpga/V1/conv_add_bn_kernel.cpp +++ b/src/operators/kernel/fpga/V1/conv_add_bn_kernel.cpp @@ -22,7 +22,10 @@ namespace operators { template <> bool ConvAddBNKernel::Init(FusionConvAddBNParam *param) { - bool relu_enabled = false; + // bool relu_enabled = false; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::NONE; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); auto bias = param->Bias(); @@ -61,10 +64,10 @@ bool ConvAddBNKernel::Init(FusionConvAddBNParam *param) { fpga::format_conv_data(filter, out, &bs_ptr, param->Groups()); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_split_arg(&conv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(conv_arg); return true; diff --git a/src/operators/kernel/fpga/V1/conv_add_bn_relu_kernel.cpp b/src/operators/kernel/fpga/V1/conv_add_bn_relu_kernel.cpp index 7f720323253fff53f7d1bb92f8bfeec77bf0da14..b7b99be78acae80c46b9d1bd1f3cb72d5f4a7cfb 100644 --- a/src/operators/kernel/fpga/V1/conv_add_bn_relu_kernel.cpp +++ b/src/operators/kernel/fpga/V1/conv_add_bn_relu_kernel.cpp @@ -23,7 +23,10 @@ namespace operators { template <> bool ConvAddBNReluKernel::Init( FusionConvAddBNReluParam *param) { - bool relu_enabled = true; + // bool relu_enabled = true; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::LEAKYRELU; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); auto bias = param->Bias(); auto bias_ptr = bias->data(); @@ -64,16 +67,16 @@ bool ConvAddBNReluKernel::Init( if (groups == channel) { fpga::format_dwconv_data(filter, out, new_scale_ptr, &new_bias_ptr); fpga::DWconvArgs dwconv_arg = {0}; - fpga::fill_dwconv_arg(&dwconv_arg, input, out, filter, relu_enabled, - strides[0], strides[1], paddings[0], paddings[1], - new_bias_ptr); + fpga::fill_dwconv_arg(&dwconv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, strides[0], strides[1], + paddings[0], paddings[1], new_bias_ptr); param->SetFpgaArgs(dwconv_arg); } else { fpga::format_conv_data(filter, out, &bs_ptr, param->Groups()); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input, out, filter, relu_enabled, - param->Groups(), strides[0], strides[1], paddings[0], - paddings[1], bs_ptr); + fpga::fill_split_arg(&conv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), strides[0], + strides[1], paddings[0], paddings[1], bs_ptr); param->SetFpgaArgs(conv_arg); } return true; diff --git a/src/operators/kernel/fpga/V1/conv_add_kernel.cpp b/src/operators/kernel/fpga/V1/conv_add_kernel.cpp old mode 100755 new mode 100644 index e566dc9b165811a3e8a9f78d040cc8c571fd93a9..153be5a4f888c2a39a7b05b9a7fbb72e305acb8d --- a/src/operators/kernel/fpga/V1/conv_add_kernel.cpp +++ b/src/operators/kernel/fpga/V1/conv_add_kernel.cpp @@ -21,7 +21,10 @@ namespace operators { template <> bool ConvAddKernel::Init(FusionConvAddParam *param) { - bool relu_enabled = false; + // bool relu_enabled = false; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::NONE; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); const Tensor *bias = param->Bias(); auto bias_ptr = bias->data(); @@ -40,10 +43,10 @@ bool ConvAddKernel::Init(FusionConvAddParam *param) { fpga::format_conv_data(filter, out, &bs_ptr, param->Groups()); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_split_arg(&conv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(conv_arg); return true; } diff --git a/src/operators/kernel/fpga/V1/conv_add_relu_kernel.cpp b/src/operators/kernel/fpga/V1/conv_add_relu_kernel.cpp old mode 100755 new mode 100644 index 6b2a2d77c0df29b4c319061776491b0583157d6f..eef35bf74b6b28e3ec0c49d6b7ace0a350f3f194 --- a/src/operators/kernel/fpga/V1/conv_add_relu_kernel.cpp +++ b/src/operators/kernel/fpga/V1/conv_add_relu_kernel.cpp @@ -21,7 +21,10 @@ namespace operators { template <> bool ConvAddReluKernel::Init(FusionConvAddReluParam *param) { - bool relu_enabled = true; + // bool relu_enabled = true; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::LEAKYRELU; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); const Tensor *bias = param->Bias(); auto bias_ptr = bias->data(); @@ -40,10 +43,10 @@ bool ConvAddReluKernel::Init(FusionConvAddReluParam *param) { fpga::format_conv_data(filter, out, &bs_ptr, param->Groups()); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_split_arg(&conv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(conv_arg); return true; } diff --git a/src/operators/kernel/fpga/V1/conv_bn_kernel.cpp b/src/operators/kernel/fpga/V1/conv_bn_kernel.cpp index 492d418b9023a3c4c802da099a5da5ebf5568649..c4c2bf184d536ace31e52defb59e97c154386464 100644 --- a/src/operators/kernel/fpga/V1/conv_bn_kernel.cpp +++ b/src/operators/kernel/fpga/V1/conv_bn_kernel.cpp @@ -22,7 +22,10 @@ namespace operators { template <> bool ConvBNKernel::Init(FusionConvBNParam *param) { - bool relu_enabled = false; + // bool relu_enabled = false; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::NONE; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); auto filter = const_cast(param->Filter()); auto out = param->Output(); @@ -53,10 +56,10 @@ bool ConvBNKernel::Init(FusionConvBNParam *param) { fpga::format_conv_data(filter, out, &bs_ptr, param->Groups()); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_split_arg(&conv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(conv_arg); return true; } diff --git a/src/operators/kernel/fpga/V1/conv_bn_relu_kernel.cpp b/src/operators/kernel/fpga/V1/conv_bn_relu_kernel.cpp index 337b25ffa5d3ba00cd60935f8643213cb5ea70d3..463c90d1bb0dcd48a7b41aff73b830d14f989c73 100644 --- a/src/operators/kernel/fpga/V1/conv_bn_relu_kernel.cpp +++ b/src/operators/kernel/fpga/V1/conv_bn_relu_kernel.cpp @@ -22,7 +22,10 @@ namespace operators { template <> bool ConvBNReluKernel::Init(FusionConvBNReluParam *param) { - bool relu_enabled = true; + // bool relu_enabled = true; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::LEAKYRELU; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); auto filter = const_cast(param->Filter()); auto out = param->Output(); @@ -53,10 +56,10 @@ bool ConvBNReluKernel::Init(FusionConvBNReluParam *param) { fpga::format_conv_data(filter, out, &bs_ptr, param->Groups()); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_split_arg(&conv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(conv_arg); return true; } diff --git a/src/operators/kernel/fpga/V1/deconv_add_kernel.cpp b/src/operators/kernel/fpga/V1/deconv_add_kernel.cpp index 1e21d374cb3651e582f43b2875a9c302ae86cdfb..97a4d5516b52939a3a1d90a22c8050679810d405 100644 --- a/src/operators/kernel/fpga/V1/deconv_add_kernel.cpp +++ b/src/operators/kernel/fpga/V1/deconv_add_kernel.cpp @@ -23,7 +23,10 @@ namespace operators { template <> bool DeconvAddKernel::Init(FusionDeconvAddParam *param) { - bool relu_enabled = false; + // bool relu_enabled = false; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::NONE; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); const Tensor *bias = param->Bias(); auto bias_ptr = bias->data(); @@ -53,17 +56,18 @@ bool DeconvAddKernel::Init(FusionDeconvAddParam *param) { fpga::format_DWDeconv_data(filter, out, &bs_ptr, param->Groups(), sub_conv_n); fpga::DWDeconvArgs DWDeconv_arg = {0}; - fpga::fill_DWDeconv_arg(&DWDeconv_arg, input, out, filter, relu_enabled, + fpga::fill_DWDeconv_arg(&DWDeconv_arg, input, out, filter, + activation_enable, leaky_relu_negative_slope, param->Strides()[0], param->Strides()[1], param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(DWDeconv_arg); } else { fpga::format_deconv_data(filter, out, &bs_ptr, param->Groups(), sub_conv_n); fpga::DeconvArgs deconv_arg = {0}; - fpga::fill_deconv_arg(&deconv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_deconv_arg(&deconv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(deconv_arg); } diff --git a/src/operators/kernel/fpga/V1/deconv_add_relu_kernel.cpp b/src/operators/kernel/fpga/V1/deconv_add_relu_kernel.cpp index ca77b2fd440fdfcfb61498205739b3ded6346ebc..f0b29943d7731d716a19cff1e3cfc904d7610c0b 100644 --- a/src/operators/kernel/fpga/V1/deconv_add_relu_kernel.cpp +++ b/src/operators/kernel/fpga/V1/deconv_add_relu_kernel.cpp @@ -24,7 +24,10 @@ namespace operators { template <> bool DeconvAddReluKernel::Init( FusionDeconvAddReluParam *param) { - bool relu_enabled = true; + // bool relu_enabled = true; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::LEAKYRELU; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->Input()); const Tensor *bias = param->Bias(); auto bias_ptr = bias->data(); @@ -54,17 +57,18 @@ bool DeconvAddReluKernel::Init( fpga::format_DWDeconv_data(filter, out, &bs_ptr, param->Groups(), sub_conv_n); fpga::DWDeconvArgs DWDeconv_arg = {0}; - fpga::fill_DWDeconv_arg(&DWDeconv_arg, input, out, filter, relu_enabled, + fpga::fill_DWDeconv_arg(&DWDeconv_arg, input, out, filter, + activation_enable, leaky_relu_negative_slope, param->Strides()[0], param->Strides()[1], param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(DWDeconv_arg); } else { fpga::format_deconv_data(filter, out, &bs_ptr, param->Groups(), sub_conv_n); fpga::DeconvArgs deconv_arg = {0}; - fpga::fill_deconv_arg(&deconv_arg, input, out, filter, relu_enabled, - param->Groups(), param->Strides()[0], - param->Strides()[1], param->Paddings()[0], - param->Paddings()[1], bs_ptr); + fpga::fill_deconv_arg(&deconv_arg, input, out, filter, activation_enable, + leaky_relu_negative_slope, param->Groups(), + param->Strides()[0], param->Strides()[1], + param->Paddings()[0], param->Paddings()[1], bs_ptr); param->SetFpgaArgs(deconv_arg); } return true; diff --git a/src/operators/kernel/fpga/V1/elementwise_add_kernel.cpp b/src/operators/kernel/fpga/V1/elementwise_add_kernel.cpp index be773412f099410b02f24b1d38d2a44d6ca77689..27eee7e5ba7045473ff035f45236d04e080a692e 100644 --- a/src/operators/kernel/fpga/V1/elementwise_add_kernel.cpp +++ b/src/operators/kernel/fpga/V1/elementwise_add_kernel.cpp @@ -20,7 +20,10 @@ namespace operators { template <> bool ElementwiseAddKernel::Init(ElementwiseAddParam *param) { - bool relu_enabled = false; + // bool relu_enabled = false; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::NONE; + int16_t leaky_relu_negative_slope = 0; auto *input_x = const_cast(param->InputX()); auto *input_y = const_cast(param->InputY()); auto *out = param->Out(); @@ -30,7 +33,10 @@ bool ElementwiseAddKernel::Init(ElementwiseAddParam *param) { auto out_ptr = out->mutable_data(); fpga::EWAddArgs ewaddArgs = {0}; - ewaddArgs.relu_enabled = relu_enabled; + // ewaddArgs.relu_enabled = relu_enabled; + ewaddArgs.output.activation.activation_type = activation_enable; + ewaddArgs.output.activation.leaky_relu_negative_slope = + leaky_relu_negative_slope; ewaddArgs.const0 = 0x3c00; // =1 ewaddArgs.const1 = 0x3c00; // =1 ewaddArgs.image0.address = input_x_ptr; diff --git a/src/operators/kernel/fpga/V1/elementwise_add_relu_kernel.cpp b/src/operators/kernel/fpga/V1/elementwise_add_relu_kernel.cpp index 541bb6126509dc7da59fa6bed5c46aff3442928b..fbbe679d4b6a6d4b0ca0a25ebb7aacf93a133943 100644 --- a/src/operators/kernel/fpga/V1/elementwise_add_relu_kernel.cpp +++ b/src/operators/kernel/fpga/V1/elementwise_add_relu_kernel.cpp @@ -21,7 +21,10 @@ namespace operators { template <> bool ElementwiseAddReluKernel::Init( ElementwiseAddReluParam *param) { - bool relu_enabled = true; + // bool relu_enabled = true; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::LEAKYRELU; + int16_t leaky_relu_negative_slope = 0; auto *input_x = const_cast(param->InputX()); auto *input_y = const_cast(param->InputY()); auto *out = param->Out(); @@ -31,7 +34,10 @@ bool ElementwiseAddReluKernel::Init( auto out_ptr = out->mutable_data(); fpga::EWAddArgs ewaddArgs = {0}; - ewaddArgs.relu_enabled = relu_enabled; + // ewaddArgs.relu_enabled = relu_enabled; + ewaddArgs.output.activation.activation_type = activation_enable; + ewaddArgs.output.activation.leaky_relu_negative_slope = + leaky_relu_negative_slope; ewaddArgs.const0 = 0x3c00; // =1 ewaddArgs.const1 = 0x3c00; // =1 ewaddArgs.image0.address = input_x_ptr; diff --git a/src/operators/kernel/fpga/V1/fetch_kernel.cpp b/src/operators/kernel/fpga/V1/fetch_kernel.cpp index e6e4591168b90cbe19b207cd9e77eaf5cd07de80..c00bdf57a259e24669c33f011d7b77eb20d4b308 100644 --- a/src/operators/kernel/fpga/V1/fetch_kernel.cpp +++ b/src/operators/kernel/fpga/V1/fetch_kernel.cpp @@ -19,12 +19,34 @@ namespace operators { template <> bool FetchKernel::Init(FetchParam *param) { + Tensor *output = param->Out(); + // fpga::format_fp16_ofm(output); return true; } template <> void FetchKernel::Compute(const FetchParam ¶m) { param.Out()->ShareDataWith(*(param.InputX())); + /*auto input = + reinterpret_cast(const_cast(param.InputX())); + fpga::format_image(input); + auto input_ptr = input->data(); + Tensor *output = param.Out(); + auto output_ptr = output->data(); + + fpga::BypassArgs args = {fpga::DATA_TYPE_FP16}; + + args.input_data_type = fpga::DATA_TYPE_FP16; + args.output_data_type = fpga::DATA_TYPE_FP32; + args.input_layout_type = fpga::LAYOUT_CHW; + args.output_layout_type = fpga::LAYOUT_HWC; + args.image.address = reinterpret_cast(input_ptr); + args.image.channels = (uint32_t)input->dims()[1]; + args.image.height = (input->dims().size() == 4) ? (uint32_t)input->dims()[2] : + 1; args.image.width = (input->dims().size() == 4) ? (uint32_t)input->dims()[3] + : 1; args.image.pad_height = 0; args.image.pad_width = 0; args.output.address + = output_ptr; args.output.scale_address = output->scale; + fpga::PerformBypass(args);*/ } template class FetchKernel; diff --git a/src/operators/kernel/fpga/V1/fusion_fc_kernel.cpp b/src/operators/kernel/fpga/V1/fusion_fc_kernel.cpp index 9258fb90e1e6bf9a597a387843ce781858628139..fadeae324ff8f5160bc5ff410c2e02b09539a01e 100644 --- a/src/operators/kernel/fpga/V1/fusion_fc_kernel.cpp +++ b/src/operators/kernel/fpga/V1/fusion_fc_kernel.cpp @@ -20,7 +20,10 @@ namespace operators { template <> bool FusionFcKernel::Init(FusionFcParam *param) { - bool relu_enabled = false; + // bool relu_enabled = false; + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::NONE; + int16_t leaky_relu_negative_slope = 0; auto input_x = const_cast(param->InputX()); auto filter = const_cast(param->InputY()); const Tensor *input_z = param->InputZ(); @@ -55,8 +58,8 @@ bool FusionFcKernel::Init(FusionFcParam *param) { fpga::format_fp16_ofm(out); fpga::SplitConvArgs conv_arg = {0}; - fpga::fill_split_arg(&conv_arg, input_x, out, filter, relu_enabled, 1, 1, 1, - 0, 0, bs_ptr); + fpga::fill_split_arg(&conv_arg, input_x, out, filter, activation_enable, + leaky_relu_negative_slope, 1, 1, 1, 0, 0, bs_ptr); param->SetFpgaArgs(conv_arg); return true; } diff --git a/src/operators/kernel/fpga/V1/reshape_kernel.cpp b/src/operators/kernel/fpga/V1/reshape_kernel.cpp index f5495e6d005f7f7c14ebd3d290ea9be02b9f0951..5e01bb74bab6996ca59632ae31f37ecfeafc918c 100644 --- a/src/operators/kernel/fpga/V1/reshape_kernel.cpp +++ b/src/operators/kernel/fpga/V1/reshape_kernel.cpp @@ -22,6 +22,12 @@ namespace operators { template <> bool ReshapeKernel::Init(ReshapeParam *param) { param->Out()->ShareDataWith(*param->InputX()); + const int in_n = param->InputX()->dims()[0]; + const int in_c = param->InputX()->dims()[1]; + const int in_h = param->InputX()->dims()[2]; + const int in_w = param->InputX()->dims()[3]; + auto out = param->Out(); + out->Resize(framework::make_ddim({in_n, in_c * in_h * in_w})); return true; } diff --git a/src/operators/kernel/fpga/V1/sigmoid_kernel.cpp b/src/operators/kernel/fpga/V1/sigmoid_kernel.cpp index 276a8fef62edfabfabb116fada145eedbf23ffa3..6c836e2776891f283677287eae54019f0dbef39b 100644 --- a/src/operators/kernel/fpga/V1/sigmoid_kernel.cpp +++ b/src/operators/kernel/fpga/V1/sigmoid_kernel.cpp @@ -15,73 +15,41 @@ limitations under the License. */ #ifdef SIGMOID_OP #include "operators/kernel/activation_kernel.h" + namespace paddle_mobile { namespace operators { -using framework::DDim; -using framework::Tensor; - template <> bool SigmoidKernel::Init(SigmoidParam *param) { + paddle_mobile::fpga::ActivationType activation_enable = + paddle_mobile::fpga::SIGMOID; + int16_t leaky_relu_negative_slope = 0; auto input = const_cast(param->InputX()); auto input_ptr = input->data(); auto out = param->Out(); - fpga::format_fp32_ofm(out); + fpga::format_fp16_ofm(out); - auto float_input = new Tensor; - if (input->dims().size() == 2) { - float_input->mutable_data({1, input->dims()[1]}); - } else if (input->dims().size() == 4) { - float_input->mutable_data( - {1, input->dims()[2], input->dims()[3], input->dims()[1]}); - } else { - DLOG << "wrong dimension of softmax input"; - } - - fpga::format_fp32_ofm(float_input); fpga::BypassArgs args = {fpga::DATA_TYPE_FP16}; - args.input_layout_type = fpga::LAYOUT_HWC; - args.output_layout_type = fpga::LAYOUT_CHW; args.input_data_type = fpga::DATA_TYPE_FP16; - args.output_data_type = fpga::DATA_TYPE_FP32; + args.output_data_type = fpga::DATA_TYPE_FP16; args.image.address = input_ptr; args.image.height = (input->dims().size() == 4) ? (uint32_t)input->dims()[2] : 1; args.image.width = (input->dims().size() == 4) ? (uint32_t)input->dims()[3] : 1; args.image.channels = (uint32_t)input->dims()[1]; - args.output.address = float_input->data(); - args.output.scale_address = float_input->scale; - param->SetFloatInput(float_input); + args.output.address = out->data(); + args.output.scale_address = out->scale; + args.output.activation.activation_type = activation_enable; + args.output.activation.leaky_relu_negative_slope = leaky_relu_negative_slope; param->SetFpgaArgs(args); - return true; } -template -T Sigmoid(const T a) { - T tmp = -1.0f * a; - return (1.0 / (1.0 + exp(tmp))); -} -template -void sigmoidFuntor(Tensor *input, Tensor *output) { - auto *input_ptr = input->data(); - auto *output_ptr = output->mutable_data(); - for (int i = 0; i < input->numel(); i++) { - *(output_ptr + i) = Sigmoid(*(input_ptr + i)); - } -} template <> void SigmoidKernel::Compute(const SigmoidParam ¶m) { - Tensor *in_x = param.FloatInput(); - Tensor *out = param.Out(); - fpga::PerformBypass(param.FpgaArgs()); - fpga::fpga_invalidate((void *)in_x->data(), // NOLINT - in_x->numel() * sizeof(float)); - // TODO: In general case, 0 should be squeezed before softmax input // NOLINT - sigmoidFuntor(in_x, out); - fpga::fpga_flush(out->data(), out->memory_size()); } + } // namespace operators } // namespace paddle_mobile diff --git a/src/operators/kernel/fpga/V1/softmax_kernel.cpp b/src/operators/kernel/fpga/V1/softmax_kernel.cpp index e5ada795b120c1438688089078be20e03f078cbb..2698fdece49409aec017112e8613a706c248cf48 100644 --- a/src/operators/kernel/fpga/V1/softmax_kernel.cpp +++ b/src/operators/kernel/fpga/V1/softmax_kernel.cpp @@ -26,7 +26,6 @@ bool SoftmaxKernel::Init(SoftmaxParam *param) { auto input_ptr = input->data(); auto out = param->Out(); fpga::format_fp32_ofm(out); - auto float_input = new Tensor; if (input->dims().size() == 2) { float_input->mutable_data({1, input->dims()[1]}); @@ -36,7 +35,6 @@ bool SoftmaxKernel::Init(SoftmaxParam *param) { } else { DLOG << "wrong dimension of softmax input"; } - fpga::format_fp32_ofm(float_input); fpga::BypassArgs args = {fpga::DATA_TYPE_FP16}; args.input_layout_type = fpga::LAYOUT_HWC; @@ -53,6 +51,7 @@ bool SoftmaxKernel::Init(SoftmaxParam *param) { args.output.scale_address = float_input->scale; param->SetFloatInput(float_input); param->SetFpgaArgs(args); + return true; } diff --git a/src/operators/op_param.h b/src/operators/op_param.h index d90dff2d7e919f736b5cfd0531074944938f2a8a..e3da6724a8c33501f50bc463ee25a88166f4351b 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -1081,14 +1081,9 @@ class SigmoidParam : public OpParam { #ifdef PADDLE_MOBILE_FPGA private: - std::shared_ptr float_input_x_; fpga::BypassArgs fpga_bypass_args; public: - RType *FloatInput() const { - return float_input_x_ == nullptr ? input_x_ : float_input_x_.get(); - } - void SetFloatInput(Tensor *input) { float_input_x_.reset(input); } const fpga::BypassArgs &FpgaArgs() const { return fpga_bypass_args; } void SetFpgaArgs(const fpga::BypassArgs &args) { fpga_bypass_args = args; } #endif @@ -1214,6 +1209,20 @@ class FetchParam : public OpParam { private: RType *input_x_; Tensor *out_; +#ifdef PADDLE_MOBILE_FPGA + + private: + std::shared_ptr float_input_x_; + fpga::BypassArgs fpga_bypass_args; + + public: + RType *FloatInput() const { + return float_input_x_ == nullptr ? input_x_ : float_input_x_.get(); + } + void SetFloatInput(Tensor *input) { float_input_x_.reset(input); } + const fpga::BypassArgs &FpgaArgs() const { return fpga_bypass_args; } + void SetFpgaArgs(const fpga::BypassArgs &args) { fpga_bypass_args = args; } +#endif }; #ifdef FILL_CONSTANT_OP