提交 4ea56919 编写于 作者: qnqinan's avatar qnqinan

sync code with remote

上级 397bb881
...@@ -19,13 +19,8 @@ limitations under the License. */ ...@@ -19,13 +19,8 @@ limitations under the License. */
#include <iostream> #include <iostream>
#include "../../src/io/paddle_inference_api.h" #include "../../src/io/paddle_inference_api.h"
<<<<<<< HEAD
using namespace paddle_mobile;
using namespace paddle_mobile::fpga;
=======
using namespace paddle_mobile; // NOLINT using namespace paddle_mobile; // NOLINT
using namespace paddle_mobile::fpga; // NOLINT using namespace paddle_mobile::fpga; // NOLINT
>>>>>>> upstream/develop
static const char *g_image = "../images/mobilenet_txtdata/1.txt"; static const char *g_image = "../images/mobilenet_txtdata/1.txt";
static const char *g_model = "../models/keycurve_l2_regular4_model/__model__"; static const char *g_model = "../models/keycurve_l2_regular4_model/__model__";
...@@ -57,21 +52,12 @@ signed char float_to_int8(float fdata) { ...@@ -57,21 +52,12 @@ signed char float_to_int8(float fdata) {
} }
void quantize(float **data_in, int data_size) { void quantize(float **data_in, int data_size) {
float *tmp = *data_in; float *tmp = *data_in;
<<<<<<< HEAD
signed char *tmp_data =
(signed char *)paddle_mobile::fpga::fpga_malloc(data_size * sizeof(char));
=======
signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char)); signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char));
>>>>>>> upstream/develop
for (int i = 0; i < data_size; i++) { for (int i = 0; i < data_size; i++) {
tmp_data[i] = float_to_int8((*data_in)[i] + 128); tmp_data[i] = float_to_int8((*data_in)[i] + 128);
} }
*data_in = (float *)tmp_data; // NOLINT *data_in = (float *)tmp_data; // NOLINT
<<<<<<< HEAD
paddle_mobile::fpga::fpga_free(tmp);
=======
fpga_free(tmp); fpga_free(tmp);
>>>>>>> upstream/develop
} }
void convert_to_chw(float **data_in, int channel, int height, int width, void convert_to_chw(float **data_in, int channel, int height, int width,
...@@ -92,12 +78,8 @@ void dump_stride_float(std::string filename, PaddleTensor input_tensor) { ...@@ -92,12 +78,8 @@ void dump_stride_float(std::string filename, PaddleTensor input_tensor) {
int h = (input_tensor.shape)[2]; int h = (input_tensor.shape)[2];
int w = (input_tensor.shape)[3]; int w = (input_tensor.shape)[3];
int n = (input_tensor.shape)[0]; int n = (input_tensor.shape)[0];
<<<<<<< HEAD
float *data_tmp = reinterpret_cast<float *>(malloc(c * h * w * sizeof(float)));
=======
float *data_tmp = float *data_tmp =
reinterpret_cast<float *>(malloc(c * h * w * sizeof(float))); reinterpret_cast<float *>(malloc(c * h * w * sizeof(float)));
>>>>>>> upstream/develop
convert_to_chw(&data_ptr, c, h, w, data_tmp); convert_to_chw(&data_ptr, c, h, w, data_tmp);
std::ofstream out(filename.c_str()); std::ofstream out(filename.c_str());
float result = 0; float result = 0;
...@@ -138,11 +120,7 @@ int main() { ...@@ -138,11 +120,7 @@ int main() {
open_device(); open_device();
PaddleMobileConfig config = GetConfig(); PaddleMobileConfig config = GetConfig();
auto predictor = auto predictor =
<<<<<<< HEAD
CreatePaddlePredictor<PaddleMobileConfig,
=======
CreatePaddlePredictor<paddle_mobile::PaddleMobileConfig, CreatePaddlePredictor<paddle_mobile::PaddleMobileConfig,
>>>>>>> upstream/develop
PaddleEngineKind::kPaddleMobile>(config); PaddleEngineKind::kPaddleMobile>(config);
std::cout << "Finishing loading model" << std::endl; std::cout << "Finishing loading model" << std::endl;
......
...@@ -19,13 +19,8 @@ limitations under the License. */ ...@@ -19,13 +19,8 @@ limitations under the License. */
#include <iostream> #include <iostream>
#include "../../src/io/paddle_inference_api.h" #include "../../src/io/paddle_inference_api.h"
<<<<<<< HEAD
using namespace paddle_mobile;
using namespace paddle_mobile::fpga;
=======
using namespace paddle_mobile; // NOLINT using namespace paddle_mobile; // NOLINT
using namespace paddle_mobile::fpga; // NOLINT using namespace paddle_mobile::fpga; // NOLINT
>>>>>>> upstream/develop
static const char *g_image = "../images/yolo_test_txtimg/1.txt"; static const char *g_image = "../images/yolo_test_txtimg/1.txt";
static const char *g_model = "../models/yolo_bn_l2_model/__model__"; static const char *g_model = "../models/yolo_bn_l2_model/__model__";
...@@ -56,21 +51,12 @@ signed char float_to_int8(float fdata) { ...@@ -56,21 +51,12 @@ signed char float_to_int8(float fdata) {
} }
void quantize(float **data_in, int data_size) { void quantize(float **data_in, int data_size) {
float *tmp = *data_in; float *tmp = *data_in;
<<<<<<< HEAD
signed char *tmp_data =
(signed char *)paddle_mobile::fpga::fpga_malloc(data_size * sizeof(char));
=======
signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char)); signed char *tmp_data = (signed char *)fpga_malloc(data_size * sizeof(char));
>>>>>>> upstream/develop
for (int i = 0; i < data_size; i++) { for (int i = 0; i < data_size; i++) {
tmp_data[i] = float_to_int8((*data_in)[i] + 128); tmp_data[i] = float_to_int8((*data_in)[i] + 128);
} }
*data_in = (float *)tmp_data; // NOLINT *data_in = (float *)tmp_data; // NOLINT
<<<<<<< HEAD
paddle_mobile::fpga::fpga_free(tmp);
=======
fpga_free(tmp); fpga_free(tmp);
>>>>>>> upstream/develop
} }
void convert_to_chw(float **data_in, int channel, int height, int width, void convert_to_chw(float **data_in, int channel, int height, int width,
...@@ -91,12 +77,8 @@ void dump_stride_float(std::string filename, PaddleTensor input_tensor) { ...@@ -91,12 +77,8 @@ void dump_stride_float(std::string filename, PaddleTensor input_tensor) {
int h = (input_tensor.shape)[2]; int h = (input_tensor.shape)[2];
int w = (input_tensor.shape)[3]; int w = (input_tensor.shape)[3];
int n = (input_tensor.shape)[0]; int n = (input_tensor.shape)[0];
<<<<<<< HEAD
float *data_tmp = reinterpret_cast<float *>(malloc(c * h * w * sizeof(float)));
=======
float *data_tmp = float *data_tmp =
reinterpret_cast<float *>(malloc(c * h * w * sizeof(float))); reinterpret_cast<float *>(malloc(c * h * w * sizeof(float)));
>>>>>>> upstream/develop
convert_to_chw(&data_ptr, c, h, w, data_tmp); convert_to_chw(&data_ptr, c, h, w, data_tmp);
std::ofstream out(filename.c_str()); std::ofstream out(filename.c_str());
float result = 0; float result = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册