提交 b8907700 编写于 作者: B barrierye

Merge branch 'develop' of https://github.com/PaddlePaddle/Serving into develop

......@@ -285,22 +285,16 @@ int PredictorClient::batch_predict(
// int idx = _fetch_name_to_idx[name];
if (_fetch_name_to_type[name] == 0) {
VLOG(2) << "ferch var " << name << "type int";
model._int64_value_map[name].resize(
output.insts(0).tensor_array(idx).int64_data_size());
int size = output.insts(0).tensor_array(idx).int64_data_size();
for (int i = 0; i < size; ++i) {
model._int64_value_map[name][i] =
output.insts(0).tensor_array(idx).int64_data(i);
}
model._int64_value_map[name] = std::vector<int64_t>(
output.insts(0).tensor_array(idx).int64_data().begin(),
output.insts(0).tensor_array(idx).int64_data().begin() + size);
} else {
VLOG(2) << "fetch var " << name << "type float";
model._float_value_map[name].resize(
output.insts(0).tensor_array(idx).float_data_size());
int size = output.insts(0).tensor_array(idx).float_data_size();
for (int i = 0; i < size; ++i) {
model._float_value_map[name][i] =
output.insts(0).tensor_array(idx).float_data(i);
}
model._float_value_map[name] = std::vector<float>(
output.insts(0).tensor_array(idx).float_data().begin(),
output.insts(0).tensor_array(idx).float_data().begin() + size);
}
idx += 1;
}
......@@ -564,22 +558,16 @@ int PredictorClient::numpy_predict(
// int idx = _fetch_name_to_idx[name];
if (_fetch_name_to_type[name] == 0) {
VLOG(2) << "ferch var " << name << "type int";
model._int64_value_map[name].resize(
output.insts(0).tensor_array(idx).int64_data_size());
int size = output.insts(0).tensor_array(idx).int64_data_size();
for (int i = 0; i < size; ++i) {
model._int64_value_map[name][i] =
output.insts(0).tensor_array(idx).int64_data(i);
}
model._int64_value_map[name] = std::vector<int64_t>(
output.insts(0).tensor_array(idx).int64_data().begin(),
output.insts(0).tensor_array(idx).int64_data().begin() + size);
} else {
VLOG(2) << "fetch var " << name << "type float";
model._float_value_map[name].resize(
output.insts(0).tensor_array(idx).float_data_size());
int size = output.insts(0).tensor_array(idx).float_data_size();
for (int i = 0; i < size; ++i) {
model._float_value_map[name][i] =
output.insts(0).tensor_array(idx).float_data(i);
}
model._float_value_map[name] = std::vector<float>(
output.insts(0).tensor_array(idx).float_data().begin(),
output.insts(0).tensor_array(idx).float_data().begin() + size);
}
idx += 1;
}
......
......@@ -3,45 +3,45 @@
## CPU server
### Python 3
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.0-py3-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.1-py3-none-any.whl
```
### Python 2
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.0-py2-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.1-py2-none-any.whl
```
## GPU server
### Python 3
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.0-py3-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.1-py3-none-any.whl
```
### Python 2
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.0-py2-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.1-py2-none-any.whl
```
## Client
### Python 3.7
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.0-cp37-none-manylinux1_x86_64.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.1-cp37-none-any.whl
```
### Python 3.6
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.0-cp36-none-manylinux1_x86_64.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.1-cp36-none-any.whl
```
### Python 2.7
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.0-cp27-none-manylinux1_x86_64.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.1-cp27-none-any.whl
```
## App
### Python 3
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.0-py3-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.1-py3-none-any.whl
```
### Python 2
```
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.0-py2-none-any.whl
https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.1-py2-none-any.whl
```
......@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving App version string """
serving_app_version = "0.1.0"
serving_app_version = "0.1.1"
......@@ -391,7 +391,13 @@ class MultiLangClient(object):
self._parse_model_config(path)
def connect(self, endpoint):
self.channel_ = grpc.insecure_channel(endpoint[0]) #TODO
# https://github.com/tensorflow/serving/issues/1382
options = [('grpc.max_receive_message_length', 512 * 1024 * 1024),
('grpc.max_send_message_length', 512 * 1024 * 1024),
('grpc.max_receive_message_length', 512 * 1024 * 1024)]
self.channel_ = grpc.insecure_channel(
endpoint[0], options=options) #TODO
self.stub_ = multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelServiceStub(
self.channel_)
......
......@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.3.0"
serving_server_version = "0.3.0"
module_proto_version = "0.3.0"
serving_client_version = "0.3.1"
serving_server_version = "0.3.1"
module_proto_version = "0.3.1"
......@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.3.0"
serving_server_version = "0.3.0"
module_proto_version = "0.3.0"
serving_client_version = "0.3.1"
serving_server_version = "0.3.1"
module_proto_version = "0.3.1"
......@@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.3.0"
serving_server_version = "0.3.0"
module_proto_version = "0.3.0"
serving_client_version = "0.3.1"
serving_server_version = "0.3.1"
module_proto_version = "0.3.1"
......@@ -15,6 +15,6 @@
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
import re
with open("setup.cfg", "w") as f:
line = "[bdist_wheel]\npython-tag={0}{1}\nplat-name=manylinux1_x86_64".format(
get_abbr_impl(), get_impl_ver())
line = "[bdist_wheel]\npython-tag={0}{1}".format(get_abbr_impl(),
get_impl_ver())
f.write(line)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册