diff --git a/test/fpga/test_mobilenet_api.cpp b/test/fpga/test_mobilenet_api.cpp index 69965b34c63170ac5e894ede36892e98d0595baa..4b372773937722942b70c584dda1eeb22339841f 100644 --- a/test/fpga/test_mobilenet_api.cpp +++ b/test/fpga/test_mobilenet_api.cpp @@ -19,13 +19,8 @@ limitations under the License. */ #include #include "../../src/io/paddle_inference_api.h" -<<<<<<< HEAD -using namespace paddle_mobile; -using namespace paddle_mobile::fpga; -======= using namespace paddle_mobile; // NOLINT using namespace paddle_mobile::fpga; // NOLINT ->>>>>>> upstream/develop static const char *g_image = "../images/mobilenet_txtdata/1.txt"; static const char *g_model = "../models/keycurve_l2_regular4_model/__model__"; @@ -57,21 +52,12 @@ signed char float_to_int8(float fdata) { } void quantize(float **data_in, int data_size) { float *tmp = *data_in; -<<<<<<< HEAD - signed char *tmp_data = - (signed char *)paddle_mobile::fpga::fpga_malloc(data_size * sizeof(char)); -======= signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char)); ->>>>>>> upstream/develop for (int i = 0; i < data_size; i++) { tmp_data[i] = float_to_int8((*data_in)[i] + 128); } *data_in = (float *)tmp_data; // NOLINT -<<<<<<< HEAD - paddle_mobile::fpga::fpga_free(tmp); -======= fpga_free(tmp); ->>>>>>> upstream/develop } void convert_to_chw(float **data_in, int channel, int height, int width, @@ -92,12 +78,8 @@ void dump_stride_float(std::string filename, PaddleTensor input_tensor) { int h = (input_tensor.shape)[2]; int w = (input_tensor.shape)[3]; int n = (input_tensor.shape)[0]; -<<<<<<< HEAD - float *data_tmp = reinterpret_cast(malloc(c * h * w * sizeof(float))); -======= float *data_tmp = reinterpret_cast(malloc(c * h * w * sizeof(float))); ->>>>>>> upstream/develop convert_to_chw(&data_ptr, c, h, w, data_tmp); std::ofstream out(filename.c_str()); float result = 0; @@ -138,11 +120,7 @@ int main() { open_device(); PaddleMobileConfig config = GetConfig(); auto predictor = -<<<<<<< HEAD - CreatePaddlePredictor>>>>>> upstream/develop PaddleEngineKind::kPaddleMobile>(config); std::cout << "Finishing loading model" << std::endl; diff --git a/test/fpga/test_yolo_api.cpp b/test/fpga/test_yolo_api.cpp index 8b4ae775dbd5e236e301db66a18c7387a17e00a7..4ef890506eb1c40638242b9767267756a64da787 100644 --- a/test/fpga/test_yolo_api.cpp +++ b/test/fpga/test_yolo_api.cpp @@ -19,13 +19,8 @@ limitations under the License. */ #include #include "../../src/io/paddle_inference_api.h" -<<<<<<< HEAD -using namespace paddle_mobile; -using namespace paddle_mobile::fpga; -======= using namespace paddle_mobile; // NOLINT using namespace paddle_mobile::fpga; // NOLINT ->>>>>>> upstream/develop static const char *g_image = "../images/yolo_test_txtimg/1.txt"; static const char *g_model = "../models/yolo_bn_l2_model/__model__"; @@ -56,21 +51,12 @@ signed char float_to_int8(float fdata) { } void quantize(float **data_in, int data_size) { float *tmp = *data_in; -<<<<<<< HEAD - signed char *tmp_data = - (signed char *)paddle_mobile::fpga::fpga_malloc(data_size * sizeof(char)); -======= signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char)); ->>>>>>> upstream/develop for (int i = 0; i < data_size; i++) { tmp_data[i] = float_to_int8((*data_in)[i] + 128); } *data_in = (float *)tmp_data; // NOLINT -<<<<<<< HEAD - paddle_mobile::fpga::fpga_free(tmp); -======= fpga_free(tmp); ->>>>>>> upstream/develop } void convert_to_chw(float **data_in, int channel, int height, int width, @@ -91,12 +77,8 @@ void dump_stride_float(std::string filename, PaddleTensor input_tensor) { int h = (input_tensor.shape)[2]; int w = (input_tensor.shape)[3]; int n = (input_tensor.shape)[0]; -<<<<<<< HEAD - float *data_tmp = reinterpret_cast(malloc(c * h * w * sizeof(float))); -======= float *data_tmp = reinterpret_cast(malloc(c * h * w * sizeof(float))); ->>>>>>> upstream/develop convert_to_chw(&data_ptr, c, h, w, data_tmp); std::ofstream out(filename.c_str()); float result = 0;