diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index 86f75bc1c1b401cd14f2c6651ea52ef08fdb8c40..cab050e732fb701120c7f1a5c80737fc75282324 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -345,7 +345,7 @@ int PredictorClient::numpy_predict( PredictorRes &predict_res_batch, const int &pid) { int batch_size = std::max(float_feed_batch.size(), int_feed_batch.size()); - + VLOG(2) << "batch size: " << batch_size; predict_res_batch.clear(); Timer timeline; int64_t preprocess_start = timeline.TimeStampUS(); @@ -462,7 +462,7 @@ int PredictorClient::numpy_predict( for (ssize_t j = 0; j < int_array.shape(1); j++) { for (ssize_t k = 0; k < int_array.shape(2); k++) { for (ssize_t l = 0; k < int_array.shape(3); l++) { - tensor->add_float_data(int_array(i, j, k, l)); + tensor->add_int64_data(int_array(i, j, k, l)); } } } @@ -474,7 +474,7 @@ int PredictorClient::numpy_predict( for (ssize_t i = 0; i < int_array.shape(0); i++) { for (ssize_t j = 0; j < int_array.shape(1); j++) { for (ssize_t k = 0; k < int_array.shape(2); k++) { - tensor->add_float_data(int_array(i, j, k)); + tensor->add_int64_data(int_array(i, j, k)); } } } @@ -484,7 +484,7 @@ int PredictorClient::numpy_predict( auto int_array = int_feed[vec_idx].unchecked<2>(); for (ssize_t i = 0; i < int_array.shape(0); i++) { for (ssize_t j = 0; j < int_array.shape(1); j++) { - tensor->add_float_data(int_array(i, j)); + tensor->add_int64_data(int_array(i, j)); } } break; @@ -492,7 +492,7 @@ int PredictorClient::numpy_predict( case 1: { auto int_array = int_feed[vec_idx].unchecked<1>(); for (ssize_t i = 0; i < int_array.shape(0); i++) { - tensor->add_float_data(int_array(i)); + tensor->add_int64_data(int_array(i)); } break; } diff --git a/python/paddle_serving_app/local_predict.py b/python/paddle_serving_app/local_predict.py index 133aa4ccf32d29538d5b7032874f2c770e55e184..6620994165306a550204498e5185bb3aacca8ffd 100644 --- a/python/paddle_serving_app/local_predict.py +++ b/python/paddle_serving_app/local_predict.py @@ -71,6 +71,7 @@ class Debugger(object): if profile: config.enable_profile() config.set_cpu_math_library_num_threads(cpu_num) + config.switch_ir_optim(False) self.predictor = create_paddle_predictor(config)