From b9ce0794b068c8e73a17139fc2a40c46e4177893 Mon Sep 17 00:00:00 2001 From: qinan Date: Wed, 8 May 2019 15:43:07 +0800 Subject: [PATCH] update io file to surport int8 tensor feed and fetch fixed#1602 (#1603) * update concat and split kernel and related files in FPGA v2(v3) track * update * update * update kernel and related files in FPGA v2 track * update * update * update kernel and related files for static quantization in FPGA v2 track * update * update feed and fetch kernel in FPGA v2 track * update io file --- src/fpga/V2/api.cpp | 3 +-- src/io/api_paddle_mobile.cc | 11 +++++++++-- src/operators/kernel/fpga/V2/feed_kernel.cpp | 1 + 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/fpga/V2/api.cpp b/src/fpga/V2/api.cpp index 38538087cd..be95caa5d8 100644 --- a/src/fpga/V2/api.cpp +++ b/src/fpga/V2/api.cpp @@ -32,7 +32,7 @@ void format_image(framework::Tensor *image_tensor) { int8_t *p_data = external_ptr == nullptr ? data_ptr : external_ptr; image::format_image(&p_data, channel, height, width); - if (p_data != data_ptr && external_ptr == nullptr) { + if (p_data != data_ptr) { image_tensor->reset_data_ptr(p_data); } } @@ -43,7 +43,6 @@ void format_ofm(framework::Tensor *ofm_tensor) { } else { format_int8_ofm(ofm_tensor); } - format_int8_ofm(ofm_tensor); } void format_int8_ofm(framework::Tensor *ofm_tensor) { diff --git a/src/io/api_paddle_mobile.cc b/src/io/api_paddle_mobile.cc index e7782de201..d19120739e 100644 --- a/src/io/api_paddle_mobile.cc +++ b/src/io/api_paddle_mobile.cc @@ -131,9 +131,12 @@ void ConvertTensors(const framework::Tensor &src, PaddleTensor *des) { if (src.type() == type_id()) { des->data.Reset(const_cast(src.data()), num * sizeof(float)); - } else { + } else if (src.type() == type_id()) { des->data.Reset(const_cast(src.data()), num * sizeof(int16_t)); + } else { + des->data.Reset(const_cast(src.data()), + num * sizeof(int8_t)); } } @@ -143,7 +146,11 @@ void PaddleMobilePredictor::FeedPaddleTensors( auto num = inputs.size(); std::vector tensors(num, framework::Tensor()); for (int i = 0; i < num; i++) { - tensors[i].init(type_id().hash_code()); + if (inputs[i].dtypeid == type_id().hash_code()) { + tensors[i].init(type_id().hash_code()); + } else { + tensors[i].init(type_id().hash_code()); + } ConvertPaddleTensors(inputs[i], &tensors[i]); } paddle_mobile_->FeedTensorData(tensors); diff --git a/src/operators/kernel/fpga/V2/feed_kernel.cpp b/src/operators/kernel/fpga/V2/feed_kernel.cpp index ec47dca386..a706c48e12 100644 --- a/src/operators/kernel/fpga/V2/feed_kernel.cpp +++ b/src/operators/kernel/fpga/V2/feed_kernel.cpp @@ -44,6 +44,7 @@ void FeedKernel::Compute(const FeedParam ¶m) { } fpga::format_image(input); output->ShareDataWith(*input); + input->external_data = nullptr; } template class FeedKernel; -- GitLab