提交 cf3211cd 编写于 作者: Z zhangyang0701

change format

上级 0ab0ff16
...@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <iostream>
#include <fstream> #include <fstream>
#include <iostream>
#include "../../src/io/paddle_inference_api.h" #include "../../src/io/paddle_inference_api.h"
using namespace paddle_mobile; using namespace paddle_mobile;
...@@ -39,68 +39,68 @@ void readStream(std::string filename, char *buf) { ...@@ -39,68 +39,68 @@ void readStream(std::string filename, char *buf) {
} }
PaddleMobileConfig GetConfig() { PaddleMobileConfig GetConfig() {
PaddleMobileConfig config; PaddleMobileConfig config;
config.precision = PaddleMobileConfig::FP32; config.precision = PaddleMobileConfig::FP32;
config.device = PaddleMobileConfig::kFPGA; config.device = PaddleMobileConfig::kFPGA;
config.prog_file = g_model; config.prog_file = g_model;
config.param_file = g_param; config.param_file = g_param;
config.thread_num = 1; config.thread_num = 1;
config.batch_size = 1; config.batch_size = 1;
config.optimize = true; config.optimize = true;
config.lod_mode = true; config.lod_mode = true;
config.quantification = false; config.quantification = false;
return config; return config;
} }
int main() { int main() {
open_device(); open_device();
PaddleMobileConfig config = GetConfig(); PaddleMobileConfig config = GetConfig();
auto predictor = auto predictor =
CreatePaddlePredictor<PaddleMobileConfig, CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config); PaddleEngineKind::kPaddleMobile>(config);
std::cout << "after loading model" << std::endl; std::cout << "after loading model" << std::endl;
float img_info[3] = {768, 1536, 768.0f / 960.0f}; float img_info[3] = {768, 1536, 768.0f / 960.0f};
int img_length = 768 * 1536 * 3; int img_length = 768 * 1536 * 3;
auto img = reinterpret_cast<float *>(fpga_malloc(img_length * sizeof(float))); auto img = reinterpret_cast<float *>(fpga_malloc(img_length * sizeof(float)));
readStream(g_image, reinterpret_cast<char *>(img)); readStream(g_image, reinterpret_cast<char *>(img));
std::cout << "after initializing data" << std::endl; std::cout << "after initializing data" << std::endl;
/* /*
predictor->FeedData({img_info, img}); predictor->FeedData({img_info, img});
predictor->Predict_From_To(0, -1); predictor->Predict_From_To(0, -1);
std::cout << " Finishing predicting " << std::endl; std::cout << " Finishing predicting " << std::endl;
std::vector<void *> v(3, nullptr); std::vector<void *> v(3, nullptr);
predictor->GetResults(&v); predictor->GetResults(&v);
int post_nms = 300; int post_nms = 300;
for (int num = 0; num < post_nms; num ++){ for (int num = 0; num < post_nms; num ++){
for (int i = 0; i < 8; i ++){ for (int i = 0; i < 8; i ++){
std:: cout << ((float*)(v[0]))[num * 8 + i] << std::endl; std:: cout << ((float*)(v[0]))[num * 8 + i] << std::endl;
}
} }
} for (int num = 0; num < post_nms; num ++){
for (int num = 0; num < post_nms; num ++){ for (int i = 0; i < 8; i ++){
for (int i = 0; i < 8; i ++){ std:: cout << ((float*)(v[1]))[num * 8 + i] << std::endl;
std:: cout << ((float*)(v[1]))[num * 8 + i] << std::endl; }
} }
} for (int num = 0; num < post_nms; num ++){
for (int num = 0; num < post_nms; num ++){ for (int i = 0; i < 4; i ++){
for (int i = 0; i < 4; i ++){ std:: cout << ((float*)(v[2]))[num * 4 + i] << std::endl;
std:: cout << ((float*)(v[2]))[num * 4 + i] << std::endl; }
} }
} */
*/
struct PaddleTensor t_img_info, t_img; struct PaddleTensor t_img_info, t_img;
t_img_info.dtype = FLOAT32; t_img_info.dtype = FLOAT32;
t_img_info.layout = LAYOUT_HWC; t_img_info.layout = LAYOUT_HWC;
t_img_info.shape = std::vector<int>({1,3}); t_img_info.shape = std::vector<int>({1, 3});
t_img_info.name = "Image information"; t_img_info.name = "Image information";
t_img_info.data.Reset(img_info, 3 * sizeof(float)); t_img_info.data.Reset(img_info, 3 * sizeof(float));
t_img.dtype = FLOAT32; t_img.dtype = FLOAT32;
t_img.layout = LAYOUT_HWC; t_img.layout = LAYOUT_HWC;
t_img.shape = std::vector<int>({1,768, 1536, 3}); t_img.shape = std::vector<int>({1, 768, 1536, 3});
t_img.name = "Image information"; t_img.name = "Image information";
t_img.data.Reset(img, img_length * sizeof(float)); t_img.data.Reset(img, img_length * sizeof(float));
predictor->FeedPaddleTensors({t_img_info, t_img}); predictor->FeedPaddleTensors({t_img_info, t_img});
...@@ -112,24 +112,24 @@ int main() { ...@@ -112,24 +112,24 @@ int main() {
std::vector<PaddleTensor> v(3, PaddleTensor()); std::vector<PaddleTensor> v(3, PaddleTensor());
predictor->FetchPaddleTensors(&v); predictor->FetchPaddleTensors(&v);
auto post_nms = v[0].data.length()/sizeof(float)/8; auto post_nms = v[0].data.length() / sizeof(float) / 8;
for (int num = 0; num < post_nms; num ++){ for (int num = 0; num < post_nms; num++) {
for (int i = 0; i < 8; i ++){ for (int i = 0; i < 8; i++) {
auto p = reinterpret_cast<float*>(v[0].data.data()); auto p = reinterpret_cast<float *>(v[0].data.data());
std:: cout << p[num * 8 + i] << std::endl; std::cout << p[num * 8 + i] << std::endl;
} }
} }
for (int num = 0; num < post_nms; num ++){ for (int num = 0; num < post_nms; num++) {
for (int i = 0; i < 8; i ++){ for (int i = 0; i < 8; i++) {
auto p = reinterpret_cast<float*>(v[1].data.data()); auto p = reinterpret_cast<float *>(v[1].data.data());
std:: cout << p[num * 8 + i] << std::endl; std::cout << p[num * 8 + i] << std::endl;
} }
} }
for (int num = 0; num < post_nms; num ++){ for (int num = 0; num < post_nms; num++) {
for (int i = 0; i < 4; i ++){ for (int i = 0; i < 4; i++) {
auto p = reinterpret_cast<float*>(v[2].data.data()); auto p = reinterpret_cast<float *>(v[2].data.data());
std:: cout << p[num * 4 + i] << std::endl; std::cout << p[num * 4 + i] << std::endl;
} }
} }
return 0; return 0;
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册