提交 96461030 编写于 作者: qnqinan's avatar qnqinan 提交者: GitHub

Merge pull request #909 from zhangyang0701/develop

Update to format data for FPGA track close #908
...@@ -174,30 +174,43 @@ void format_ofm(framework::Tensor *ofm_tensor) { ...@@ -174,30 +174,43 @@ void format_ofm(framework::Tensor *ofm_tensor) {
ofm_tensor->reset_data_ptr(fpga_malloc(memory_size)); ofm_tensor->reset_data_ptr(fpga_malloc(memory_size));
} }
void format_filter(framework::Tensor *filter_tensor, int group_num) { float filter_find_max(framework::Tensor *filter_tensor) {
auto filter_ptr = filter_tensor->data<float>();
return filter::find_max(filter_ptr, filter_tensor->numel());
}
int get_element_num_per_div(framework::Tensor *filter_tensor, int group_num) {
auto dims = filter_tensor->dims();
PADDLE_MOBILE_ENFORCE(dims.size() == 4 || dims.size() == 2,
"Filter order should be 4 or 2");
int chw = dims.size() == 4 ? dims[1] * dims[2] * dims[3] : dims[1];
int num = dims.size() == 4 ? dims[0] : dims[1];
int div_capacity = filter::calc_division_capacity(chw);
return filter::calc_num_per_div(num, group_num, div_capacity);
}
void format_filter(framework::Tensor *filter_tensor, float max_value,
int group_num) {
auto dims = filter_tensor->dims(); auto dims = filter_tensor->dims();
int num = dims[0], channel = dims[1], height = dims[2], width = dims[3]; int num = dims[0], channel = dims[1], height = dims[2], width = dims[3];
auto data_ptr = filter_tensor->mutable_data<float>(); auto data_ptr = filter_tensor->mutable_data<float>();
size_t memory_size = num * channel * height * width * sizeof(float); size_t memory_size = num * channel * height * width * sizeof(float);
float *new_data = (float *)fpga_malloc(memory_size); float *new_data = (float *)fpga_malloc(memory_size);
fpga_copy(new_data, data_ptr, memory_size); fpga_copy(new_data, data_ptr, memory_size);
float max_value = filter::find_max(new_data, num * channel * height * width);
filter::format_filter(&new_data, num, channel, height, width, group_num, filter::format_filter(&new_data, num, channel, height, width, group_num,
max_value); max_value);
filter_tensor->reset_data_ptr(new_data); filter_tensor->reset_data_ptr(new_data);
} }
void format_fc_matrix(framework::Tensor *filter_tensor, int group_num, void format_fc_matrix(framework::Tensor *filter_tensor, float max_value,
int height, int width) { int group_num, int height, int width) {
auto dims = filter_tensor->dims(); auto dims = filter_tensor->dims();
PADDLE_MOBILE_ENFORCE(dims[0] % (height * width) == 0, PADDLE_MOBILE_ENFORCE(height == 1 && width == 1,
"Filter number should be divisible by group number"); "IFM should be flattened for FC");
int num = dims[1], channel = dims[0] / height / width; int num = dims[1], channel = dims[0] / height / width;
auto data_ptr = filter_tensor->mutable_data<float>(); auto data_ptr = filter_tensor->mutable_data<float>();
size_t memory_size = num * channel * height * width * sizeof(float); size_t memory_size = num * channel * height * width * sizeof(float);
float *new_data = (float *)fpga_malloc(memory_size); float *new_data = (float *)fpga_malloc(memory_size);
fpga_copy(new_data, data_ptr, memory_size); fpga_copy(new_data, data_ptr, memory_size);
float max_value = filter::find_max(new_data, num * channel * height * width);
filter::format_filter(&new_data, num, channel, height, width, group_num, filter::format_filter(&new_data, num, channel, height, width, group_num,
max_value); max_value);
filter_tensor->reset_data_ptr(new_data); filter_tensor->reset_data_ptr(new_data);
......
...@@ -54,12 +54,6 @@ struct MemoryCopyArgs { ...@@ -54,12 +54,6 @@ struct MemoryCopyArgs {
size_t size; size_t size;
}; };
struct BNArgs {
bool enabled;
void* bias_address;
void* scale_address;
};
/** /**
Conv and Pooling kernel Conv and Pooling kernel
*/ */
...@@ -178,9 +172,12 @@ int ComputeFpgaEWAdd(const struct EWAddArgs& args); ...@@ -178,9 +172,12 @@ int ComputeFpgaEWAdd(const struct EWAddArgs& args);
static inline int align_to_x(int num, int x) { return (num + x - 1) / x * x; } static inline int align_to_x(int num, int x) { return (num + x - 1) / x * x; }
void format_image(framework::Tensor* image_tensor); void format_image(framework::Tensor* image_tensor);
void format_ofm(framework::Tensor* ofm_tensor); // only allocate memory void format_ofm(framework::Tensor* ofm_tensor); // only allocate memory
void format_filter(framework::Tensor* filter_tensor, int group_num); float filter_find_max(framework::Tensor* filter_tensor);
void format_fc_matrix(framework::Tensor* filter_tensor, int group_num, int get_element_num_per_div(framework::Tensor* filter_tensor, int group_num);
int height = 1, int width = 1); void format_filter(framework::Tensor* filter_tensor, float max_value,
int group_num);
void format_fc_matrix(framework::Tensor* filter_tensor, float max_value,
int group_num, int height = 1, int width = 1);
void format_bias_scale_array(float** bias_scale_array, void format_bias_scale_array(float** bias_scale_array,
int element_num_per_division, int num); int element_num_per_division, int num);
......
...@@ -35,6 +35,11 @@ int calc_division_number(int num, int group_num, int division_capacity) { ...@@ -35,6 +35,11 @@ int calc_division_number(int num, int group_num, int division_capacity) {
} }
int calc_num_per_div(int num, int group_num, int division_capacity) { int calc_num_per_div(int num, int group_num, int division_capacity) {
PADDLE_MOBILE_ENFORCE(num % group_num == 0,
"Filter number should be divisible by group number");
int split_num = calc_split_num(num, division_capacity);
PADDLE_MOBILE_ENFORCE(group_num == 1 || split_num == 1,
"Split number or group number should be 1");
if (group_num == 1) { if (group_num == 1) {
if (num > division_capacity) { if (num > division_capacity) {
return division_capacity; return division_capacity;
......
...@@ -20,6 +20,11 @@ limitations under the License. */ ...@@ -20,6 +20,11 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace fpga { namespace fpga {
namespace filter { namespace filter {
int calc_division_capacity(int chw);
int calc_split_num(int num, int division_capacity);
int calc_division_number(int num, int group_num, int division_capacity);
int calc_num_per_div(int num, int group_num, int division_capacity);
void convert_to_hwc(float** data_in, int num, int channel, int height, void convert_to_hwc(float** data_in, int num, int channel, int height,
int width); int width);
float find_max(float* data_in, int data_size); float find_max(float* data_in, int data_size);
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "fpga/quantization.h"
#include <algorithm>
namespace paddle_mobile {
namespace fpga {
template <typename Dtype>
static void chw_to_hwc(Dtype* data_in, Dtype* data_out, int64_t num,
int64_t channel, int64_t height, int64_t width) {
for (int n = 0; n < num; n++) {
int64_t amount_per_row = width * channel;
for (int c = 0; c < channel; c++) {
for (int h = 0; h < height; h++) {
int64_t offset_height = h * amount_per_row;
for (int w = 0; w < width; w++) {
*(data_out + offset_height + w * channel + c) = *(data_in++);
}
}
}
data_out += num;
}
}
template <typename Dtype>
static Dtype find_max(Dtype* data, int64_t num) {
Dtype max = 0;
for (int i = 0; i < num; ++i) {
Dtype value = data[i];
Dtype abs = value > 0 ? value : -value;
max = std::max(max, abs);
}
return max;
}
// template <typename Dtype>
void quantize_filter(framework::Tensor* filter) {
DLOG << "quantilize_filter........" << filter->dims();
float scale = 0;
auto fix_range = static_cast<float>(std::pow(2, 8 - 1) - 1);
auto* tmp_data = new int8_t[filter->numel()];
// 32bit filter -> 8bit filter;
if (filter->type() == typeid(float)) {
auto* float_data = filter->data<float>();
auto max = find_max<float>(float_data, filter->numel());
scale = (fix_range / max);
DLOG << "scale:" << scale;
for (int i = 0; i < filter->numel(); ++i) {
tmp_data[i] = (int8_t)(float_data[i] * scale);
}
} else {
auto max = find_max<int8_t>(filter->data<int8_t>(), filter->numel());
scale = (fix_range / max);
std::memcpy(tmp_data, filter->data<int8_t>(), (size_t)filter->numel());
}
if (filter->dims().size() == 4) {
const auto batch_size = filter->dims()[0];
const auto channel = filter->dims()[1];
const auto height = filter->dims()[2];
const auto width = filter->dims()[3];
chw_to_hwc<int8_t>(tmp_data, filter->mutable_data<int8_t>(), batch_size,
channel, height, width);
} else if (filter->dims().size() == 2) {
std::memcpy(filter->mutable_data<int8_t>(), tmp_data,
(size_t)filter->numel());
}
delete tmp_data;
filter->SetFpgaScale(scale);
}
} // namespace fpga
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "common/types.h"
#include "framework/lod_tensor.h"
#include "framework/tensor.h"
namespace paddle_mobile {
namespace fpga {
template <typename Dtype>
static void chw_to_hwc(Dtype* data_in, Dtype* data_out, int64_t num,
int64_t channel, int64_t height, int64_t width);
void quantize_filter(framework::Tensor* filter);
} // namespace fpga
} // namespace paddle_mobile
...@@ -328,28 +328,7 @@ class Tensor { ...@@ -328,28 +328,7 @@ class Tensor {
inline void reset_data_ptr(void *p) { inline void reset_data_ptr(void *p) {
((PlaceholderImpl *)(holder_.get()))->ptr_.reset((uint8_t *)p); ((PlaceholderImpl *)(holder_.get()))->ptr_.reset((uint8_t *)p);
} }
float scale[2]; // scale[0]= MAX/127.0, scale[1]= 127.0/MAX
struct FPGAArgs {
friend class Tensor;
inline float *scale_pointer() { return scale_; }
inline float scale() { return *scale_; }
private:
float *scale_;
};
struct FPGAArgs fpga_args() const {
FPGAArgs args;
args.scale_ = scale.get();
return args;
}
void SetFpgaScale(float s) { *(scale.get()) = s; }
private:
std::shared_ptr<float> scale = std::make_shared<float>(0);
#endif #endif
}; };
......
...@@ -41,18 +41,19 @@ class FeedOp : public framework::OperatorBase<DeviceType> { ...@@ -41,18 +41,19 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
void Init() { void Init() {
Tensor *output = param_.Out(); Tensor *output = param_.Out();
output->mutable_data<half>(); fpga::format_ofm(output);
} }
void RunImpl() const { void RunImpl() const {
const Tensor *input = param_.InputX(); Tensor *input = const_cast<Tensor *>(param_.InputX());
auto input_ptr = input->data<float>(); auto input_ptr = input->data<float>();
fpga::format_image(input);
Tensor *output = param_.Out(); Tensor *output = param_.Out();
auto output_ptr = output->mutable_data<half>(); auto output_ptr = output->mutable_data<half>();
auto output_scale_address = output->fpga_args().scale_pointer();
fpga::BypassArgs args; fpga::BypassArgs args;
args.convert_type = fpga::DATA_FP32_TO_FP16; args.convert_type = fpga::DATA_FP32_TO_FP16;
args.layout_type = fpga::LAYOUT_CHW_TO_HWC; args.layout_type = fpga::LAYOUT_NO_CONVERT;
args.image.address = (void *)input_ptr; args.image.address = (void *)input_ptr;
args.image.channels = input->dims()[1]; args.image.channels = input->dims()[1];
args.image.height = input->dims()[2]; args.image.height = input->dims()[2];
...@@ -60,7 +61,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> { ...@@ -60,7 +61,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
args.image.pad_height = 0; args.image.pad_height = 0;
args.image.pad_width = 0; args.image.pad_width = 0;
args.output.address = output_ptr; args.output.address = output_ptr;
args.output.scale_address = output_scale_address; args.output.scale_address = output->scale;
fpga::PerformBypass(args); fpga::PerformBypass(args);
} }
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include "operators/kernel/conv_add_bn_kernel.h" #include "operators/kernel/conv_add_bn_kernel.h"
#include "fpga/api.h" #include "fpga/api.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -24,14 +23,14 @@ namespace operators { ...@@ -24,14 +23,14 @@ namespace operators {
template <> template <>
bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) { bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) {
bool relu_enabled = false; bool relu_enabled = false;
const Tensor *input = param->Input(); Tensor *input = const_cast<Tensor *>(param->Input());
auto input_ptr = input->data<half>(); auto input_ptr = input->data<half>();
const Tensor *bias = param->Bias(); const Tensor *bias = param->Bias();
auto bias_ptr = bias->data<float>(); auto bias_ptr = bias->data<float>();
Tensor *filter = param->Filter(); Tensor *filter = param->Filter();
Tensor *out = param->Output(); Tensor *out = param->Output();
auto out_ptr = out->mutable_data<half>();
auto bn_mean_ptr = param->InputMean()->data<float>(); auto bn_mean_ptr = param->InputMean()->data<float>();
auto bn_var_ptr = param->InputVariance()->data<float>(); auto bn_var_ptr = param->InputVariance()->data<float>();
auto bn_scale_ptr = param->InputScale()->data<float>(); auto bn_scale_ptr = param->InputScale()->data<float>();
...@@ -54,15 +53,23 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) { ...@@ -54,15 +53,23 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) {
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5)); static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] = new_bias_ptr[i] =
bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i]; bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i];
bs_ptr[i * 2] = new_scale_ptr[i]; bs_ptr[i + channel] = new_scale_ptr[i];
bs_ptr[i * 2 + 1] = new_bias_ptr[i]; bs_ptr[i] = new_bias_ptr[i];
} }
param->SetNewScale(new_scale); param->SetNewScale(new_scale);
param->SetNewBias(new_bias); param->SetNewBias(new_bias);
fpga::quantize_filter(filter); float max_value = fpga::filter_find_max(filter);
fpga::format_filter(filter, max_value, param->Groups());
auto filter_ptr = filter->data<int8_t>(); auto filter_ptr = filter->data<int8_t>();
int element_num_per_div =
fpga::get_element_num_per_div(filter, param->Groups());
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)filter_ptr; convArgs.filter_address = (void *)filter_ptr;
...@@ -79,9 +86,9 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) { ...@@ -79,9 +86,9 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) {
convArgs.image.width = input->dims()[3]; convArgs.image.width = input->dims()[3];
convArgs.image.pad_height = param->Paddings()[0]; convArgs.image.pad_height = param->Paddings()[0];
convArgs.image.pad_width = param->Paddings()[1]; convArgs.image.pad_width = param->Paddings()[1];
convArgs.image.scale_address = input->fpga_args().scale_pointer(); convArgs.image.scale_address = input->scale;
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = out->fpga_args().scale_pointer(); convArgs.output.scale_address = out->scale;
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#ifdef FUSION_CONVADDBNRELU_OP #ifdef FUSION_CONVADDBNRELU_OP
#include "operators/kernel/conv_add_bn_relu_kernel.h" #include "operators/kernel/conv_add_bn_relu_kernel.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -24,13 +23,12 @@ template <> ...@@ -24,13 +23,12 @@ template <>
bool ConvAddBNReluKernel<FPGA, float>::Init( bool ConvAddBNReluKernel<FPGA, float>::Init(
FusionConvAddBNReluParam<FPGA> *param) { FusionConvAddBNReluParam<FPGA> *param) {
bool relu_enabled = true; bool relu_enabled = true;
const Tensor *input = param->Input(); Tensor *input = const_cast<Tensor *>(param->Input());
auto input_ptr = input->data<half>(); auto input_ptr = input->data<half>();
const Tensor *bias = param->Bias(); const Tensor *bias = param->Bias();
auto bias_ptr = bias->data<float>(); auto bias_ptr = bias->data<float>();
Tensor *filter = param->Filter(); Tensor *filter = param->Filter();
Tensor *out = param->Output(); Tensor *out = param->Output();
auto out_ptr = out->mutable_data<half>();
auto bn_mean_ptr = param->InputMean()->data<float>(); auto bn_mean_ptr = param->InputMean()->data<float>();
auto bn_var_ptr = param->InputVariance()->data<float>(); auto bn_var_ptr = param->InputVariance()->data<float>();
auto bn_scale_ptr = param->InputScale()->data<float>(); auto bn_scale_ptr = param->InputScale()->data<float>();
...@@ -52,14 +50,23 @@ bool ConvAddBNReluKernel<FPGA, float>::Init( ...@@ -52,14 +50,23 @@ bool ConvAddBNReluKernel<FPGA, float>::Init(
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5)); static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] = new_bias_ptr[i] =
bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i]; bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i];
bs_ptr[i * 2] = new_scale_ptr[i]; bs_ptr[i + 2] = new_scale_ptr[i];
bs_ptr[i * 2 + 1] = new_bias_ptr[i]; bs_ptr[i] = new_bias_ptr[i];
} }
param->SetNewScale(new_scale); param->SetNewScale(new_scale);
param->SetNewBias(new_bias); param->SetNewBias(new_bias);
fpga::quantize_filter(filter);
float max_value = fpga::filter_find_max(filter);
fpga::format_filter(filter, max_value, param->Groups());
auto filter_ptr = filter->data<int8_t>(); auto filter_ptr = filter->data<int8_t>();
int element_num_per_div =
fpga::get_element_num_per_div(filter, param->Groups());
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)filter_ptr; convArgs.filter_address = (void *)filter_ptr;
...@@ -76,9 +83,9 @@ bool ConvAddBNReluKernel<FPGA, float>::Init( ...@@ -76,9 +83,9 @@ bool ConvAddBNReluKernel<FPGA, float>::Init(
convArgs.image.width = input->dims()[3]; convArgs.image.width = input->dims()[3];
convArgs.image.pad_height = param->Paddings()[0]; convArgs.image.pad_height = param->Paddings()[0];
convArgs.image.pad_width = param->Paddings()[1]; convArgs.image.pad_width = param->Paddings()[1];
convArgs.image.scale_address = input->fpga_args().scale_pointer(); convArgs.image.scale_address = input->scale;
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = out->fpga_args().scale_pointer(); convArgs.output.scale_address = out->scale;
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
} }
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#ifdef FUSION_CONVADDRELU_OP #ifdef FUSION_CONVADDRELU_OP
#include "operators/kernel/conv_add_relu_kernel.h" #include "operators/kernel/conv_add_relu_kernel.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -23,26 +22,33 @@ namespace operators { ...@@ -23,26 +22,33 @@ namespace operators {
template <> template <>
bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) { bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) {
bool relu_enabled = true; bool relu_enabled = true;
const Tensor *input = param->Input(); Tensor *input = const_cast<Tensor *>(param->Input());
auto input_ptr = input->data<half>(); auto input_ptr = input->data<half>();
const Tensor *bias = param->Bias(); const Tensor *bias = param->Bias();
auto bias_ptr = bias->data<float>(); auto bias_ptr = bias->data<float>();
Tensor *filter = param->Filter(); Tensor *filter = param->Filter();
Tensor *out = param->Output(); Tensor *out = param->Output();
auto out_ptr = out->mutable_data<half>();
PADDLE_MOBILE_ENFORCE(out->dims()[1] == bias->dims()[0], PADDLE_MOBILE_ENFORCE(out->dims()[1] == bias->dims()[0],
"Output channel should be equal to bias number"); "Output channel should be equal to bias number");
int channel = out->dims()[1]; int channel = out->dims()[1];
float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float)); float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float));
for (int i = 0; i < channel; i++) { for (int i = 0; i < channel; i++) {
bs_ptr[i * 2] = 1; bs_ptr[i + channel] = 1;
bs_ptr[i * 2 + 1] = bias_ptr[i]; bs_ptr[i] = bias_ptr[i];
} }
fpga::quantize_filter(filter); float max_value = fpga::filter_find_max(filter);
fpga::format_filter(filter, max_value, param->Groups());
auto filter_ptr = filter->data<int8_t>(); auto filter_ptr = filter->data<int8_t>();
int element_num_per_div =
fpga::get_element_num_per_div(filter, param->Groups());
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)filter_ptr; convArgs.filter_address = (void *)filter_ptr;
...@@ -60,9 +66,9 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) { ...@@ -60,9 +66,9 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) {
convArgs.image.pad_height = param->Paddings()[0]; convArgs.image.pad_height = param->Paddings()[0];
convArgs.image.pad_width = param->Paddings()[1]; convArgs.image.pad_width = param->Paddings()[1];
convArgs.image.scale_address = input->fpga_args().scale_pointer(); convArgs.image.scale_address = input->scale;
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = out->fpga_args().scale_pointer(); convArgs.output.scale_address = out->scale;
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
} }
......
...@@ -16,7 +16,6 @@ limitations under the License. */ ...@@ -16,7 +16,6 @@ limitations under the License. */
#include "operators/kernel/conv_bn_kernel.h" #include "operators/kernel/conv_bn_kernel.h"
#include "fpga/api.h" #include "fpga/api.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -24,12 +23,11 @@ namespace operators { ...@@ -24,12 +23,11 @@ namespace operators {
template <> template <>
bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) { bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) {
bool relu_enabled = false; bool relu_enabled = false;
const Tensor *input = param->Input(); Tensor *input = const_cast<Tensor *>(param->Input());
auto input_ptr = input->data<half>(); auto input_ptr = input->data<half>();
Tensor *filter = param->Filter(); Tensor *filter = param->Filter();
Tensor *out = param->Output(); Tensor *out = param->Output();
auto out_ptr = out->mutable_data<half>();
auto bn_mean_ptr = param->InputMean()->data<float>(); auto bn_mean_ptr = param->InputMean()->data<float>();
auto bn_var_ptr = param->InputVariance()->data<float>(); auto bn_var_ptr = param->InputVariance()->data<float>();
auto bn_scale_ptr = param->InputScale()->data<float>(); auto bn_scale_ptr = param->InputScale()->data<float>();
...@@ -50,14 +48,23 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) { ...@@ -50,14 +48,23 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) {
new_scale_ptr[i] = bn_scale_ptr[i] / new_scale_ptr[i] = bn_scale_ptr[i] /
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5)); static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] = bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i]; new_bias_ptr[i] = bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i];
bs_ptr[i * 2] = new_scale_ptr[i]; bs_ptr[i + channel] = new_scale_ptr[i];
bs_ptr[i * 2 + 1] = new_bias_ptr[i]; bs_ptr[i] = new_bias_ptr[i];
} }
param->SetNewScale(new_scale); param->SetNewScale(new_scale);
param->SetNewBias(new_bias); param->SetNewBias(new_bias);
fpga::quantize_filter(filter);
float max_value = fpga::filter_find_max(filter);
fpga::format_filter(filter, max_value, param->Groups());
auto filter_ptr = filter->data<int8_t>(); auto filter_ptr = filter->data<int8_t>();
int element_num_per_div =
fpga::get_element_num_per_div(filter, param->Groups());
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)filter_ptr; convArgs.filter_address = (void *)filter_ptr;
...@@ -74,9 +81,9 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) { ...@@ -74,9 +81,9 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam<FPGA> *param) {
convArgs.image.width = input->dims()[3]; convArgs.image.width = input->dims()[3];
convArgs.image.pad_height = param->Paddings()[0]; convArgs.image.pad_height = param->Paddings()[0];
convArgs.image.pad_width = param->Paddings()[1]; convArgs.image.pad_width = param->Paddings()[1];
convArgs.image.scale_address = input->fpga_args().scale_pointer(); convArgs.image.scale_address = input->scale;
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = out->fpga_args().scale_pointer(); convArgs.output.scale_address = out->scale;
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#ifdef FUSION_CONVBNRELU_OP #ifdef FUSION_CONVBNRELU_OP
#include "operators/kernel/conv_bn_relu_kernel.h" #include "operators/kernel/conv_bn_relu_kernel.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -23,11 +22,10 @@ namespace operators { ...@@ -23,11 +22,10 @@ namespace operators {
template <> template <>
bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) { bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) {
bool relu_enabled = true; bool relu_enabled = true;
const Tensor *input = param->Input(); Tensor *input = const_cast<Tensor *>(param->Input());
auto input_ptr = input->data<half>(); auto input_ptr = input->data<half>();
Tensor *filter = param->Filter(); Tensor *filter = param->Filter();
Tensor *out = param->Output(); Tensor *out = param->Output();
auto out_ptr = out->mutable_data<half>();
auto bn_mean_ptr = param->InputMean()->data<float>(); auto bn_mean_ptr = param->InputMean()->data<float>();
auto bn_var_ptr = param->InputVariance()->data<float>(); auto bn_var_ptr = param->InputVariance()->data<float>();
auto bn_scale_ptr = param->InputScale()->data<float>(); auto bn_scale_ptr = param->InputScale()->data<float>();
...@@ -47,14 +45,23 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) { ...@@ -47,14 +45,23 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) {
new_scale_ptr[i] = bn_scale_ptr[i] / new_scale_ptr[i] = bn_scale_ptr[i] /
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5)); static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] = bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i]; new_bias_ptr[i] = bn_bias_ptr[i] + (0 - bn_mean_ptr[i]) * new_scale_ptr[i];
bs_ptr[i * 2] = new_scale_ptr[i]; bs_ptr[i + channel] = new_scale_ptr[i];
bs_ptr[i * 2 + 1] = new_bias_ptr[i]; bs_ptr[i] = new_bias_ptr[i];
} }
param->SetNewScale(new_scale); param->SetNewScale(new_scale);
param->SetNewBias(new_bias); param->SetNewBias(new_bias);
fpga::quantize_filter(filter);
float max_value = fpga::filter_find_max(filter);
fpga::format_filter(filter, max_value, param->Groups());
auto filter_ptr = filter->data<int8_t>(); auto filter_ptr = filter->data<int8_t>();
int element_num_per_div =
fpga::get_element_num_per_div(filter, param->Groups());
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)filter_ptr; convArgs.filter_address = (void *)filter_ptr;
...@@ -71,9 +78,9 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) { ...@@ -71,9 +78,9 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam<FPGA> *param) {
convArgs.image.width = input->dims()[3]; convArgs.image.width = input->dims()[3];
convArgs.image.pad_height = param->Paddings()[0]; convArgs.image.pad_height = param->Paddings()[0];
convArgs.image.pad_width = param->Paddings()[1]; convArgs.image.pad_width = param->Paddings()[1];
convArgs.image.scale_address = input->fpga_args().scale_pointer(); convArgs.image.scale_address = input->scale;
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = out->fpga_args().scale_pointer(); convArgs.output.scale_address = out->scale;
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
} }
......
...@@ -22,11 +22,12 @@ template <> ...@@ -22,11 +22,12 @@ template <>
bool ElementwiseAddReluKernel<FPGA, float>::Init( bool ElementwiseAddReluKernel<FPGA, float>::Init(
ElementwiseAddReluParam<FPGA> *param) { ElementwiseAddReluParam<FPGA> *param) {
bool relu_enabled = true; bool relu_enabled = true;
const Tensor *input_x = param->InputX(); Tensor *input_x = const_cast<Tensor *>(param->InputX());
const Tensor *input_y = param->InputY(); Tensor *input_y = const_cast<Tensor *>(param->InputY());
Tensor *out = param->Out(); Tensor *out = param->Out();
auto input_x_ptr = input_x->data<half>(); auto input_x_ptr = input_x->data<half>();
auto input_y_ptr = input_y->data<half>(); auto input_y_ptr = input_y->data<half>();
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>(); auto out_ptr = out->mutable_data<half>();
fpga::EWAddArgs ewaddArgs; fpga::EWAddArgs ewaddArgs;
...@@ -35,21 +36,19 @@ bool ElementwiseAddReluKernel<FPGA, float>::Init( ...@@ -35,21 +36,19 @@ bool ElementwiseAddReluKernel<FPGA, float>::Init(
ewaddArgs.const1 = 1; ewaddArgs.const1 = 1;
ewaddArgs.image0.address = (void *)input_x_ptr; ewaddArgs.image0.address = (void *)input_x_ptr;
ewaddArgs.image0.channels = input_x->dims()[1]; ewaddArgs.image0.channels = input_x->dims()[1];
ewaddArgs.image0.scale_address = ewaddArgs.image0.scale_address = input_x->scale;
input_x->fpga_args().scale_pointer(); // ew has scale attribute??
ewaddArgs.image0.height = input_x->dims()[2]; ewaddArgs.image0.height = input_x->dims()[2];
ewaddArgs.image0.width = input_x->dims()[3]; ewaddArgs.image0.width = input_x->dims()[3];
ewaddArgs.image0.pad_height = 0; ewaddArgs.image0.pad_height = 0;
ewaddArgs.image0.pad_width = 0; ewaddArgs.image0.pad_width = 0;
ewaddArgs.image1.address = (void *)input_y_ptr; ewaddArgs.image1.address = (void *)input_y_ptr;
ewaddArgs.image1.channels = input_y->dims()[1]; ewaddArgs.image1.channels = input_y->dims()[1];
ewaddArgs.image1.scale_address = ewaddArgs.image1.scale_address = input_y->scale;
input_y->fpga_args().scale_pointer(); // ew has scale attribute??
ewaddArgs.image1.height = input_y->dims()[2]; ewaddArgs.image1.height = input_y->dims()[2];
ewaddArgs.image1.width = input_y->dims()[3]; ewaddArgs.image1.width = input_y->dims()[3];
ewaddArgs.image1.pad_height = 0; ewaddArgs.image1.pad_height = 0;
ewaddArgs.image1.pad_width = 0; ewaddArgs.image1.pad_width = 0;
ewaddArgs.output.scale_address = out->fpga_args().scale_pointer(); ewaddArgs.output.scale_address = out->scale;
ewaddArgs.output.address = (void *)out_ptr; ewaddArgs.output.address = (void *)out_ptr;
param->SetFpgaArgs(ewaddArgs); param->SetFpgaArgs(ewaddArgs);
return true; return true;
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#include "operators/kernel/fc_relu_kernel.h" #include "operators/kernel/fc_relu_kernel.h"
#include "fpga/api.h" #include "fpga/api.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -23,26 +22,42 @@ namespace operators { ...@@ -23,26 +22,42 @@ namespace operators {
template <> template <>
bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) { bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) {
bool relu_enabled = true; bool relu_enabled = true;
const Tensor *input_x = param->InputX(); Tensor *input_x = const_cast<Tensor *>(param->InputX());
auto input_x_ptr = input_x->data<half>(); auto input_x_ptr = input_x->data<half>();
Tensor *input_y = param->InputY(); Tensor *input_y = param->InputY();
const Tensor *input_z = param->InputZ(); const Tensor *input_z = param->InputZ();
auto input_z_ptr = input_z->data<float>(); auto input_z_ptr = input_z->data<float>();
Tensor *out = param->Out(); Tensor *out = param->Out();
auto out_ptr = out->mutable_data<half>();
PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == input_y->dims()[0], PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == input_y->dims()[0],
"Image channel should be equal to weight number"); "Image channel should be equal to weight number");
int channel = out->dims()[1]; int channel = out->dims()[1];
float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float)); float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float));
for (int i = 0; i < channel; i++) { for (int i = 0; i < channel; i++) {
bs_ptr[i * 2] = 1; bs_ptr[i + channel] = 1;
bs_ptr[i * 2 + 1] = input_z_ptr[i]; bs_ptr[i] = input_z_ptr[i];
} }
fpga::quantize_filter(input_y); int num = input_y->dims()[1];
int chw = input_y->dims()[0];
PADDLE_MOBILE_ENFORCE(
chw == input_x->numel(),
"Filter element num should be equal to IFM element num");
int height = input_x->dims()[2];
int width = input_x->dims()[3];
int filter_channel = chw / height / width;
input_y->Resize(framework::make_ddim({num, filter_channel, height, width}));
float max_value = fpga::filter_find_max(input_y);
fpga::format_filter(input_y, max_value, 1);
auto input_y_ptr = input_y->data<int8_t>(); auto input_y_ptr = input_y->data<int8_t>();
int element_num_per_div = fpga::get_element_num_per_div(input_y, 1);
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)input_y_ptr; convArgs.filter_address = (void *)input_y_ptr;
...@@ -59,11 +74,9 @@ bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) { ...@@ -59,11 +74,9 @@ bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam<FPGA> *param) {
convArgs.image.width = input_x->dims()[3]; convArgs.image.width = input_x->dims()[3];
convArgs.image.pad_height = 0; convArgs.image.pad_height = 0;
convArgs.image.pad_width = 0; convArgs.image.pad_width = 0;
convArgs.image.scale_address = convArgs.image.scale_address = input_x->scale;
input_x->fpga_args().scale_pointer(); // fc input has scale attribute??
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = convArgs.output.scale_address = out->scale;
out->fpga_args().scale_pointer(); // fc output has scale attribute??
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
......
...@@ -14,7 +14,6 @@ limitations under the License. */ ...@@ -14,7 +14,6 @@ limitations under the License. */
#ifdef FUSION_FC_OP #ifdef FUSION_FC_OP
#include "operators/kernel/fusion_fc_kernel.h" #include "operators/kernel/fusion_fc_kernel.h"
#include "fpga/quantization.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -22,26 +21,42 @@ namespace operators { ...@@ -22,26 +21,42 @@ namespace operators {
template <> template <>
bool FusionFcKernel<FPGA, float>::Init(FusionFcParam<FPGA> *param) { bool FusionFcKernel<FPGA, float>::Init(FusionFcParam<FPGA> *param) {
bool relu_enabled = false; bool relu_enabled = false;
const Tensor *input_x = param->InputX(); Tensor *input_x = const_cast<Tensor *>(param->InputX());
auto input_x_ptr = input_x->data<half>(); auto input_x_ptr = input_x->data<half>();
Tensor *input_y = param->InputY(); Tensor *input_y = param->InputY();
const Tensor *input_z = param->InputZ(); const Tensor *input_z = param->InputZ();
auto input_z_ptr = input_z->data<float>(); auto input_z_ptr = input_z->data<float>();
Tensor *out = param->Out(); Tensor *out = param->Out();
auto out_ptr = out->mutable_data<half>();
PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == input_y->dims()[0], PADDLE_MOBILE_ENFORCE(input_x->dims()[1] == input_y->dims()[0],
"Image channel should be equal to weight number"); "Image channel should be equal to weight number");
int channel = out->dims()[1]; int channel = out->dims()[1];
float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float)); float *bs_ptr = (float *)fpga::fpga_malloc(2 * channel * sizeof(float));
for (int i = 0; i < channel; i++) { for (int i = 0; i < channel; i++) {
bs_ptr[i * 2] = 1; bs_ptr[i + channel] = 1;
bs_ptr[i * 2 + 1] = input_z_ptr[i]; bs_ptr[i] = input_z_ptr[i];
} }
fpga::quantize_filter(input_y); int num = input_y->dims()[1];
int chw = input_y->dims()[0];
PADDLE_MOBILE_ENFORCE(
chw == input_x->numel(),
"Filter element num should be equal to IFM element num");
int height = input_x->dims()[2];
int width = input_x->dims()[3];
int filter_channel = chw / height / width;
input_y->Resize(framework::make_ddim({num, filter_channel, height, width}));
float max_value = fpga::filter_find_max(input_y);
fpga::format_filter(input_y, max_value, 1);
auto input_y_ptr = input_y->data<int8_t>(); auto input_y_ptr = input_y->data<int8_t>();
int element_num_per_div = fpga::get_element_num_per_div(input_y, 1);
fpga::format_bias_scale_array(&bs_ptr, element_num_per_div, channel);
fpga::format_ofm(out);
auto out_ptr = out->mutable_data<half>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
convArgs.relu_enabled = relu_enabled; convArgs.relu_enabled = relu_enabled;
convArgs.filter_address = (void *)input_y_ptr; convArgs.filter_address = (void *)input_y_ptr;
...@@ -58,9 +73,9 @@ bool FusionFcKernel<FPGA, float>::Init(FusionFcParam<FPGA> *param) { ...@@ -58,9 +73,9 @@ bool FusionFcKernel<FPGA, float>::Init(FusionFcParam<FPGA> *param) {
convArgs.image.width = input_x->dims()[3]; convArgs.image.width = input_x->dims()[3];
convArgs.image.pad_height = 0; convArgs.image.pad_height = 0;
convArgs.image.pad_width = 0; convArgs.image.pad_width = 0;
convArgs.image.scale_address = input_x->fpga_args().scale_pointer(); convArgs.image.scale_address = input_x->scale;
convArgs.output.address = (void *)out_ptr; convArgs.output.address = (void *)out_ptr;
convArgs.output.scale_address = out->fpga_args().scale_pointer(); convArgs.output.scale_address = out->scale;
param->SetFpgaArgs(convArgs); param->SetFpgaArgs(convArgs);
return true; return true;
} }
......
...@@ -21,9 +21,10 @@ namespace operators { ...@@ -21,9 +21,10 @@ namespace operators {
template <> template <>
bool PoolKernel<FPGA, float>::Init(PoolParam<FPGA> *param) { bool PoolKernel<FPGA, float>::Init(PoolParam<FPGA> *param) {
const Tensor *input = param->Input(); Tensor *input = const_cast<Tensor *>(param->Input());
auto input_ptr = input->data<half>(); auto input_ptr = input->data<half>();
Tensor *output = param->Output(); Tensor *output = param->Output();
fpga::format_ofm(output);
auto output_ptr = output->mutable_data<half>(); auto output_ptr = output->mutable_data<half>();
vector<int> ksize = param->Ksize(); vector<int> ksize = param->Ksize();
vector<int> strides = param->Strides(); vector<int> strides = param->Strides();
...@@ -36,7 +37,9 @@ bool PoolKernel<FPGA, float>::Init(PoolParam<FPGA> *param) { ...@@ -36,7 +37,9 @@ bool PoolKernel<FPGA, float>::Init(PoolParam<FPGA> *param) {
poolArgs.image.width = input->dims()[3]; poolArgs.image.width = input->dims()[3];
poolArgs.image.pad_height = paddings[0]; poolArgs.image.pad_height = paddings[0];
poolArgs.image.pad_width = paddings[1]; poolArgs.image.pad_width = paddings[1];
poolArgs.image.scale_address = input->scale;
poolArgs.output.address = output_ptr; poolArgs.output.address = output_ptr;
poolArgs.output.scale_address = input->scale;
poolArgs.kernel.height = ksize[0]; poolArgs.kernel.height = ksize[0];
poolArgs.kernel.width = ksize[1]; poolArgs.kernel.width = ksize[1];
poolArgs.kernel.stride_h = strides[0]; poolArgs.kernel.stride_h = strides[0];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册