diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index d4e54c2ac04cf84b2a036f7abe0d426e6f186699..613abf9233610d170bce4386798662f78887edf7 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -285,22 +285,16 @@ int PredictorClient::batch_predict( // int idx = _fetch_name_to_idx[name]; if (_fetch_name_to_type[name] == 0) { VLOG(2) << "ferch var " << name << "type int"; - model._int64_value_map[name].resize( - output.insts(0).tensor_array(idx).int64_data_size()); int size = output.insts(0).tensor_array(idx).int64_data_size(); - for (int i = 0; i < size; ++i) { - model._int64_value_map[name][i] = - output.insts(0).tensor_array(idx).int64_data(i); - } + model._int64_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).int64_data().begin(), + output.insts(0).tensor_array(idx).int64_data().begin() + size); } else { VLOG(2) << "fetch var " << name << "type float"; - model._float_value_map[name].resize( - output.insts(0).tensor_array(idx).float_data_size()); int size = output.insts(0).tensor_array(idx).float_data_size(); - for (int i = 0; i < size; ++i) { - model._float_value_map[name][i] = - output.insts(0).tensor_array(idx).float_data(i); - } + model._float_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).float_data().begin(), + output.insts(0).tensor_array(idx).float_data().begin() + size); } idx += 1; } @@ -564,22 +558,16 @@ int PredictorClient::numpy_predict( // int idx = _fetch_name_to_idx[name]; if (_fetch_name_to_type[name] == 0) { VLOG(2) << "ferch var " << name << "type int"; - model._int64_value_map[name].resize( - output.insts(0).tensor_array(idx).int64_data_size()); int size = output.insts(0).tensor_array(idx).int64_data_size(); - for (int i = 0; i < size; ++i) { - model._int64_value_map[name][i] = - output.insts(0).tensor_array(idx).int64_data(i); - } + model._int64_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).int64_data().begin(), + output.insts(0).tensor_array(idx).int64_data().begin() + size); } else { VLOG(2) << "fetch var " << name << "type float"; - model._float_value_map[name].resize( - output.insts(0).tensor_array(idx).float_data_size()); int size = output.insts(0).tensor_array(idx).float_data_size(); - for (int i = 0; i < size; ++i) { - model._float_value_map[name][i] = - output.insts(0).tensor_array(idx).float_data(i); - } + model._float_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).float_data().begin(), + output.insts(0).tensor_array(idx).float_data().begin() + size); } idx += 1; } diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 58ae09bc7bb3b4e1239e9eca58e325c981158bff..63f827167de6417a15097d0ea2c7834e7fbf2d20 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -391,7 +391,13 @@ class MultiLangClient(object): self._parse_model_config(path) def connect(self, endpoint): - self.channel_ = grpc.insecure_channel(endpoint[0]) #TODO + # https://github.com/tensorflow/serving/issues/1382 + options = [('grpc.max_receive_message_length', 512 * 1024 * 1024), + ('grpc.max_send_message_length', 512 * 1024 * 1024), + ('grpc.max_receive_message_length', 512 * 1024 * 1024)] + + self.channel_ = grpc.insecure_channel( + endpoint[0], options=options) #TODO self.stub_ = multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelServiceStub( self.channel_)