提交 e22f59d7 编写于 作者: H hjchen2

Merge branch 'develop' of https://github.com/PaddlePaddle/paddle-mobile into dev-latest

...@@ -28,8 +28,8 @@ template <> ...@@ -28,8 +28,8 @@ template <>
void FeedKernel<FPGA, float>::Compute(const FeedParam<FPGA> &param) { void FeedKernel<FPGA, float>::Compute(const FeedParam<FPGA> &param) {
auto input = auto input =
reinterpret_cast<Tensor *>(const_cast<LoDTensor *>(param.InputX())); reinterpret_cast<Tensor *>(const_cast<LoDTensor *>(param.InputX()));
auto input_ptr = input->data<float>();
fpga::format_image(input); fpga::format_image(input);
auto input_ptr = input->data<float>();
Tensor *output = param.Out(); Tensor *output = param.Out();
auto output_ptr = output->data<float>(); auto output_ptr = output->data<float>();
......
...@@ -29,8 +29,8 @@ template <> ...@@ -29,8 +29,8 @@ template <>
void FeedKernel<FPGA, float>::Compute(const FeedParam<FPGA> &param) { void FeedKernel<FPGA, float>::Compute(const FeedParam<FPGA> &param) {
auto input = auto input =
reinterpret_cast<Tensor *>(const_cast<LoDTensor *>(param.InputX())); reinterpret_cast<Tensor *>(const_cast<LoDTensor *>(param.InputX()));
auto input_ptr = input->data<float>();
fpga::format_image(input); fpga::format_image(input);
auto input_ptr = input->data<float>();
Tensor *output = param.Out(); Tensor *output = param.Out();
auto output_ptr = output->data<float>(); auto output_ptr = output->data<float>();
......
...@@ -27,20 +27,23 @@ enum RoundType { ...@@ -27,20 +27,23 @@ enum RoundType {
} }
template <round::RoundType T> template <round::RoundType T>
static int8_t Round(float x); struct Round {
int8_t operator()(float x);
};
template <> template <>
static int8_t Round<round::RoundAwayZero>(float x) { struct Round<round::RoundAwayZero> {
return std::round(x); int8_t operator()(float x) { return std::round(x); }
} };
template <> template <>
static int8_t Round<round::RoundTowardsZero>(float x) { struct Round<round::RoundTowardsZero> {
return int8_t(x); int8_t operator()(float x) { return int8_t(x); }
} };
template <> template <>
static int8_t Round<round::RoundToEven>(float x) { struct Round<round::RoundToEven> {
int8_t operator()(float x) {
int8_t ret = 0; int8_t ret = 0;
float v = std::round(x); float v = std::round(x);
int32_t q = (int32_t)v; int32_t q = (int32_t)v;
...@@ -54,7 +57,8 @@ static int8_t Round<round::RoundToEven>(float x) { ...@@ -54,7 +57,8 @@ static int8_t Round<round::RoundToEven>(float x) {
} }
} }
return ret; return ret;
} }
};
template <round::RoundType T> template <round::RoundType T>
static void quantize(const Tensor *input, const float scale, const int pad, static void quantize(const Tensor *input, const float scale, const int pad,
...@@ -70,7 +74,6 @@ static void quantize(const Tensor *input, const float scale, const int pad, ...@@ -70,7 +74,6 @@ static void quantize(const Tensor *input, const float scale, const int pad,
const float *x = input->data<const float>(); const float *x = input->data<const float>();
int8_t *y = output->mutable_data<int8_t>(); int8_t *y = output->mutable_data<int8_t>();
std::cout << "pad: " << pad << ", pad_val: " << int(pad_val) << std::endl;
for (int nc = 0; nc < batch_size * channels; ++nc) { for (int nc = 0; nc < batch_size * channels; ++nc) {
const float *xh = x + nc * input_spatial; const float *xh = x + nc * input_spatial;
int8_t *yh = y + nc * output_spatial; int8_t *yh = y + nc * output_spatial;
...@@ -86,7 +89,7 @@ static void quantize(const Tensor *input, const float scale, const int pad, ...@@ -86,7 +89,7 @@ static void quantize(const Tensor *input, const float scale, const int pad,
yh[w] = pad_val; yh[w] = pad_val;
} }
for (int w = 0; w < input_w; ++w) { for (int w = 0; w < input_w; ++w) {
yh[w + pad] = Round<T>(xh[w] * scale); yh[w + pad] = Round<T>()(xh[w] * scale);
} }
// pad right // pad right
for (int w = 0; w < pad; ++w) { for (int w = 0; w < pad; ++w) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册