提交 d16525f7 编写于 作者: qnqinan's avatar qnqinan

update

上级 538ca2ce
...@@ -59,8 +59,8 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) { ...@@ -59,8 +59,8 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam<FPGA> *param) {
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5)); static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] = new_bias_ptr[i] =
bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i]; bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i];
// bs_ptr[i + channel] = new_scale_ptr[i]; // bs_ptr[i + channel] = new_scale_ptr[i];
// bs_ptr[i] = new_bias_ptr[i]; // bs_ptr[i] = new_bias_ptr[i];
bs_ptr[i + channel] = new_scale_ptr[i] * Si / So * Sf / 127.0; bs_ptr[i + channel] = new_scale_ptr[i] * Si / So * Sf / 127.0;
bs_ptr[i] = new_bias_ptr[i] * 127.0 / So; bs_ptr[i] = new_bias_ptr[i] * 127.0 / So;
} }
......
...@@ -59,8 +59,8 @@ bool ConvAddBNReluKernel<FPGA, float>::Init( ...@@ -59,8 +59,8 @@ bool ConvAddBNReluKernel<FPGA, float>::Init(
static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5)); static_cast<float>(pow((bn_var_ptr[i] + epsilon), 0.5));
new_bias_ptr[i] = new_bias_ptr[i] =
bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i]; bn_bias_ptr[i] + (bias_ptr[i] - bn_mean_ptr[i]) * new_scale_ptr[i];
// bs_ptr[i + channel] = new_scale_ptr[i]; // bs_ptr[i + channel] = new_scale_ptr[i];
// bs_ptr[i] = new_bias_ptr[i]; // bs_ptr[i] = new_bias_ptr[i];
bs_ptr[i + channel] = new_scale_ptr[i] * Si / So * Sf / 127.0; bs_ptr[i + channel] = new_scale_ptr[i] * Si / So * Sf / 127.0;
bs_ptr[i] = new_bias_ptr[i] * 127.0 / So; bs_ptr[i] = new_bias_ptr[i] * 127.0 / So;
} }
......
...@@ -40,8 +40,8 @@ bool ConvAddKernel<FPGA, float>::Init(FusionConvAddParam<FPGA> *param) { ...@@ -40,8 +40,8 @@ bool ConvAddKernel<FPGA, float>::Init(FusionConvAddParam<FPGA> *param) {
auto bs_ptr = auto bs_ptr =
(float *)fpga::fpga_malloc(2 * channel * sizeof(float)); // NOLINT (float *)fpga::fpga_malloc(2 * channel * sizeof(float)); // NOLINT
for (int i = 0; i < channel; i++) { for (int i = 0; i < channel; i++) {
// bs_ptr[i + channel] = 1; // bs_ptr[i + channel] = 1;
// bs_ptr[i] = bias_ptr[i]; // bs_ptr[i] = bias_ptr[i];
bs_ptr[i + channel] = Si / So * Sf / 127.0; bs_ptr[i + channel] = Si / So * Sf / 127.0;
bs_ptr[i] = bias_ptr[i] * 127.0 / So; bs_ptr[i] = bias_ptr[i] * 127.0 / So;
} }
......
...@@ -40,8 +40,8 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) { ...@@ -40,8 +40,8 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam<FPGA> *param) {
auto bs_ptr = auto bs_ptr =
(float *)fpga::fpga_malloc(2 * channel * sizeof(float)); // NOLINT (float *)fpga::fpga_malloc(2 * channel * sizeof(float)); // NOLINT
for (int i = 0; i < channel; i++) { for (int i = 0; i < channel; i++) {
// bs_ptr[i + channel] = 1; // bs_ptr[i + channel] = 1;
// bs_ptr[i] = bias_ptr[i]; // bs_ptr[i] = bias_ptr[i];
bs_ptr[i + channel] = Si / So * Sf / 127.0; bs_ptr[i + channel] = Si / So * Sf / 127.0;
bs_ptr[i] = bias_ptr[i] * 127.0 / So; bs_ptr[i] = bias_ptr[i] * 127.0 / So;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册