diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index 04bc4169b63bac7aeb28c70cbbcffd202cc2d199..79d380f6c9a7b7b2032e657a3914efb4b50c4aae 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -367,7 +367,7 @@ int PredictorClient::batch_predict( res.Clear(); if (_predictor->inference(&req, &res) != 0) { LOG(ERROR) << "failed call predictor with req: " << req.ShortDebugString(); - exit(-1); + return -1; } else { client_infer_end = timeline.TimeStampUS(); postprocess_start = client_infer_end; diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 1e59ec6d406255d068119a8c92bd02770731967f..f3d6a9a661e494bbf8f3ea9995c8e9139fd102d5 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -239,6 +239,9 @@ class Client(object): float_slot_batch, float_feed_names, int_slot_batch, int_feed_names, fetch_names, result_batch, self.pid) + if res == -1: + return None + result_map_batch = [] result_map = {} for i, name in enumerate(fetch_names):