diff --git a/src/operators/kernel/fpga/V1/feed_kernel.cpp b/src/operators/kernel/fpga/V1/feed_kernel.cpp index 161d8c9f0cf22ac79d1367e07b8ba3318a7a7123..9c6468404e334a5a3002f8702d4f3b9818028f77 100644 --- a/src/operators/kernel/fpga/V1/feed_kernel.cpp +++ b/src/operators/kernel/fpga/V1/feed_kernel.cpp @@ -28,8 +28,8 @@ template <> void FeedKernel::Compute(const FeedParam ¶m) { auto input = reinterpret_cast(const_cast(param.InputX())); - auto input_ptr = input->data(); fpga::format_image(input); + auto input_ptr = input->data(); Tensor *output = param.Out(); auto output_ptr = output->data(); diff --git a/src/operators/kernel/fpga/V2/feed_kernel.cpp b/src/operators/kernel/fpga/V2/feed_kernel.cpp index 4092307083bd38346b03857b8e9ec858795f3941..d1a721b5eb02a775498c17adcae84ce560ac1135 100644 --- a/src/operators/kernel/fpga/V2/feed_kernel.cpp +++ b/src/operators/kernel/fpga/V2/feed_kernel.cpp @@ -29,8 +29,8 @@ template <> void FeedKernel::Compute(const FeedParam ¶m) { auto input = reinterpret_cast(const_cast(param.InputX())); - auto input_ptr = input->data(); fpga::format_image(input); + auto input_ptr = input->data(); Tensor *output = param.Out(); auto output_ptr = output->data(); diff --git a/test/operators/test_quantize_op.cpp b/test/operators/test_quantize_op.cpp index 1d3b570818b994b755555bae574c75080150d390..9988661bcb898daa5e79b6d22d65d90cfa03c668 100644 --- a/test/operators/test_quantize_op.cpp +++ b/test/operators/test_quantize_op.cpp @@ -27,34 +27,38 @@ enum RoundType { } template -static int8_t Round(float x); +struct Round { + int8_t operator()(float x); +}; template <> -static int8_t Round(float x) { - return std::round(x); -} +struct Round { + int8_t operator()(float x) { return std::round(x); } +}; template <> -static int8_t Round(float x) { - return int8_t(x); -} +struct Round { + int8_t operator()(float x) { return int8_t(x); } +}; template <> -static int8_t Round(float x) { - int8_t ret = 0; - float v = std::round(x); - int32_t q = (int32_t)v; - if (abs(abs(q - x) - 0.5) > 0) { - ret = q; - } else { - if (abs(q) % 2 == 0) { +struct Round { + int8_t operator()(float x) { + int8_t ret = 0; + float v = std::round(x); + int32_t q = (int32_t)v; + if (abs(abs(q - x) - 0.5) > 0) { ret = q; } else { - ret = q + ((q > 0) ? -1 : 1); + if (abs(q) % 2 == 0) { + ret = q; + } else { + ret = q + ((q > 0) ? -1 : 1); + } } + return ret; } - return ret; -} +}; template static void quantize(const Tensor *input, const float scale, const int pad, @@ -70,7 +74,6 @@ static void quantize(const Tensor *input, const float scale, const int pad, const float *x = input->data(); int8_t *y = output->mutable_data(); - std::cout << "pad: " << pad << ", pad_val: " << int(pad_val) << std::endl; for (int nc = 0; nc < batch_size * channels; ++nc) { const float *xh = x + nc * input_spatial; int8_t *yh = y + nc * output_spatial; @@ -86,7 +89,7 @@ static void quantize(const Tensor *input, const float scale, const int pad, yh[w] = pad_val; } for (int w = 0; w < input_w; ++w) { - yh[w + pad] = Round(xh[w] * scale); + yh[w + pad] = Round()(xh[w] * scale); } // pad right for (int w = 0; w < pad; ++w) {