提交 f3d9c641 编写于 作者: Z zhangyang0701

change format

上级 111d156d
......@@ -45,65 +45,67 @@ void readStream(std::string filename, char *buf) {
in.close();
}
signed char float_to_int8(float fdata) {
if (fdata < 0.0) {
fdata -= 0.5;
} else {
fdata += 0.5;
}
return (signed char)fdata;
if (fdata < 0.0) {
fdata -= 0.5;
} else {
fdata += 0.5;
}
return (signed char)fdata;
}
void quantize(float **data_in, int data_size) {
float *tmp = *data_in;
signed char *tmp_data =
(signed char *)paddle_mobile::fpga::fpga_malloc(data_size * sizeof(char));
for (int i = 0; i < data_size; i++) {
tmp_data[i] = float_to_int8((*data_in)[i] + 128);
}
*data_in = (float *)tmp_data; // NOLINT
paddle_mobile::fpga::fpga_free(tmp);
float *tmp = *data_in;
signed char *tmp_data =
(signed char *)paddle_mobile::fpga::fpga_malloc(data_size * sizeof(char));
for (int i = 0; i < data_size; i++) {
tmp_data[i] = float_to_int8((*data_in)[i] + 128);
}
*data_in = (float *)tmp_data; // NOLINT
paddle_mobile::fpga::fpga_free(tmp);
}
void convert_to_chw(float **data_in, int channel, int height, int width,
float *data_tmp) {
int64_t amount_per_side = width * height;
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
for (int c = 0; c < channel; c++) {
*(data_tmp + c * amount_per_side + width * h + w) = *((*data_in)++);
}
}
int64_t amount_per_side = width * height;
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
for (int c = 0; c < channel; c++) {
*(data_tmp + c * amount_per_side + width * h + w) = *((*data_in)++);
}
}
}
}
void dump_stride_float(std::string filename, paddle_mobile::PaddleTensor input_tensor) {
auto data_ptr = reinterpret_cast<float *>(input_tensor.data.data());
int c = (input_tensor.shape)[1];
int h = (input_tensor.shape)[2];
int w = (input_tensor.shape)[3];
int n = (input_tensor.shape)[0];
float *data_tmp =
reinterpret_cast<float *>(malloc(c * h * w * sizeof(float)));
//convert_to_chw(&data_ptr, c, h, w, data_tmp);
std::ofstream out(filename.c_str());
float result = 0;
int datasize = abs(c * h * w * n);
if (datasize == 0) {
std::cout << "wrong dump data size" << std::endl;
return;
}
for (int i = 0; i < datasize; i++) {
result = data_ptr[i];
out << result << std::endl;
}
out.close();
void dump_stride_float(std::string filename,
paddle_mobile::PaddleTensor input_tensor) {
auto data_ptr = reinterpret_cast<float *>(input_tensor.data.data());
int c = (input_tensor.shape)[1];
int h = (input_tensor.shape)[2];
int w = (input_tensor.shape)[3];
int n = (input_tensor.shape)[0];
float *data_tmp =
reinterpret_cast<float *>(malloc(c * h * w * sizeof(float)));
// convert_to_chw(&data_ptr, c, h, w, data_tmp);
std::ofstream out(filename.c_str());
float result = 0;
int datasize = abs(c * h * w * n);
if (datasize == 0) {
std::cout << "wrong dump data size" << std::endl;
return;
}
for (int i = 0; i < datasize; i++) {
result = data_ptr[i];
out << result << std::endl;
}
out.close();
}
void dump_stride(std::string filename, paddle_mobile::PaddleTensor input_tensor) {
if (input_tensor.dtypeid == typeid(float)) {
dump_stride_float(filename, input_tensor);
} else {
std::cout << "only support dumping float data" << std::endl;
}
void dump_stride(std::string filename,
paddle_mobile::PaddleTensor input_tensor) {
if (input_tensor.dtypeid == typeid(float)) {
dump_stride_float(filename, input_tensor);
} else {
std::cout << "only support dumping float data" << std::endl;
}
}
PaddleMobileConfig GetConfig() {
PaddleMobileConfig config;
......@@ -119,64 +121,63 @@ PaddleMobileConfig GetConfig() {
return config;
}
PaddleMobileConfig GetConfig1() {
PaddleMobileConfig config;
config.precision = PaddleMobileConfig::FP32;
config.device = PaddleMobileConfig::kFPGA;
config.prog_file = g_model1;
config.param_file = g_param1;
config.thread_num = 1;
config.batch_size = 1;
config.optimize = true;
config.lod_mode = true;
config.quantification = false;
return config;
PaddleMobileConfig config;
config.precision = PaddleMobileConfig::FP32;
config.device = PaddleMobileConfig::kFPGA;
config.prog_file = g_model1;
config.param_file = g_param1;
config.thread_num = 1;
config.batch_size = 1;
config.optimize = true;
config.lod_mode = true;
config.quantification = false;
return config;
}
int main() {
open_device();
PaddleMobileConfig config1 = GetConfig1();
auto predictor1 =
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config1);
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config1);
std::cout << "Finishing loading model" << std::endl;
for(int i = 0; i < 1; ++i){
int img_length1 = 144 * 14 * 14;
auto img1 = reinterpret_cast<float *>(fpga_malloc(img_length1 * sizeof(float)));
readStream(g_image1, reinterpret_cast<char *>(img1));
std::cout << "Finishing initializing data" << std::endl;
struct PaddleTensor t_img1;
t_img1.dtypeid = typeid(float);
t_img1.layout = LAYOUT_HWC;
t_img1.shape = std::vector<int>({1, 14, 14, 144});
t_img1.name = "Image information";
t_img1.data.Reset(img1, img_length1 * sizeof(float));
predictor1->FeedPaddleTensors({t_img1});
std::cout << "Finishing feeding data " << std::endl;
predictor1->Predict_From_To(0, -1);
std::cout << "Finishing predicting " << std::endl;
std::vector<paddle_mobile::PaddleTensor> v1; // No need to initialize v
predictor1->FetchPaddleTensors(&v1); // Old data in v will be cleared
std::cout << "Output number is " << v1.size() << std::endl;
for (int fetchNum = 0; fetchNum < v1.size(); fetchNum++) {
for (int i = 0; i < 1; ++i) {
int img_length1 = 144 * 14 * 14;
auto img1 =
reinterpret_cast<float *>(fpga_malloc(img_length1 * sizeof(float)));
readStream(g_image1, reinterpret_cast<char *>(img1));
std::cout << "Finishing initializing data" << std::endl;
struct PaddleTensor t_img1;
t_img1.dtypeid = typeid(float);
t_img1.layout = LAYOUT_HWC;
t_img1.shape = std::vector<int>({1, 14, 14, 144});
t_img1.name = "Image information";
t_img1.data.Reset(img1, img_length1 * sizeof(float));
predictor1->FeedPaddleTensors({t_img1});
std::cout << "Finishing feeding data " << std::endl;
predictor1->Predict_From_To(0, -1);
std::cout << "Finishing predicting " << std::endl;
std::vector<paddle_mobile::PaddleTensor> v1; // No need to initialize v
predictor1->FetchPaddleTensors(&v1); // Old data in v will be cleared
std::cout << "Output number is " << v1.size() << std::endl;
for (int fetchNum = 0; fetchNum < v1.size(); fetchNum++) {
std::string dumpName = "marker2_api_fetch_" + std::to_string(fetchNum);
dump_stride(dumpName, v1[fetchNum]);
}
}
}
/////////////////////////////////////
/////////////////////////////////////
PaddleMobileConfig config = GetConfig();
auto predictor =
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config);
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config);
std::cout << "Finishing loading model" << std::endl;
......@@ -194,14 +195,14 @@ for(int i = 0; i < 1; ++i){
t_img_info.data.Reset(img_info, 3 * sizeof(float));
t_img.dtypeid = typeid(float);
//quantize(&img, img_length);
//t_img.dtypeid = typeid(int8_t);
// quantize(&img, img_length);
// t_img.dtypeid = typeid(int8_t);
t_img.layout = LAYOUT_HWC;
t_img.shape = std::vector<int>({1, 432, 1280, 3});
t_img.name = "Image information";
t_img.data.Reset(img, img_length * sizeof(float));
//t_img.data.Reset(img, img_length * sizeof(int8_t));
// for(int i = 0; i < 100; ++i){
// t_img.data.Reset(img, img_length * sizeof(int8_t));
// for(int i = 0; i < 100; ++i){
predictor->FeedPaddleTensors({t_img_info, t_img});
std::cout << "Finishing feeding data " << std::endl;
......@@ -209,8 +210,8 @@ for(int i = 0; i < 1; ++i){
predictor->Predict_From_To(0, -1);
std::cout << "Finishing predicting " << std::endl;
std::vector<paddle_mobile::PaddleTensor> v; // No need to initialize v
predictor->FetchPaddleTensors(&v); // Old data in v will be cleared
std::vector<paddle_mobile::PaddleTensor> v; // No need to initialize v
predictor->FetchPaddleTensors(&v); // Old data in v will be cleared
std::cout << "Output number is " << v.size() << std::endl;
for (int fetchNum = 0; fetchNum < v.size(); fetchNum++) {
std::string dumpName = "marker_api_fetch_" + std::to_string(fetchNum);
......
......@@ -19,8 +19,8 @@ limitations under the License. */
#include <iostream>
#include "../../src/io/paddle_inference_api.h"
using namespace paddle_mobile; //NOLINT
using namespace paddle_mobile::fpga; //NOLINT
using namespace paddle_mobile; // NOLINT
using namespace paddle_mobile::fpga; // NOLINT
static const char *g_image = "../images/mobilenet_txtdata/1.txt";
static const char *g_model = "../models/keycurve_l2_regular4_model/__model__";
......@@ -119,7 +119,9 @@ PaddleMobileConfig GetConfig() {
int main() {
open_device();
PaddleMobileConfig config = GetConfig();
auto predictor = CreatePaddlePredictor<paddle_mobile::PaddleMobileConfig, PaddleEngineKind::kPaddleMobile>(config);
auto predictor =
CreatePaddlePredictor<paddle_mobile::PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config);
std::cout << "Finishing loading model" << std::endl;
int img_length = 256 * 416 * 3;
......
......@@ -19,8 +19,8 @@ limitations under the License. */
#include <iostream>
#include "../../src/io/paddle_inference_api.h"
using namespace paddle_mobile; //NOLINT
using namespace paddle_mobile::fpga; //NOLINT
using namespace paddle_mobile; // NOLINT
using namespace paddle_mobile::fpga; // NOLINT
static const char *g_image = "../images/yolo_test_txtimg/1.txt";
static const char *g_model = "../models/yolo_bn_l2_model/__model__";
......@@ -51,8 +51,7 @@ signed char float_to_int8(float fdata) {
}
void quantize(float **data_in, int data_size) {
float *tmp = *data_in;
signed char *tmp_data =
(signed char *)fpga_malloc(data_size * sizeof(char));
signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char));
for (int i = 0; i < data_size; i++) {
tmp_data[i] = float_to_int8((*data_in)[i] + 128);
}
......@@ -120,7 +119,9 @@ PaddleMobileConfig GetConfig() {
int main() {
open_device();
PaddleMobileConfig config = GetConfig();
auto predictor = CreatePaddlePredictor<PaddleMobileConfig, PaddleEngineKind::kPaddleMobile>(config);
auto predictor =
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config);
std::cout << "Finishing loading model" << std::endl;
int img_length = 256 * 416 * 3;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册