diff --git a/core/general-client/src/general_model.cpp b/core/general-client/src/general_model.cpp index edac97eae3668124a7cd930a32225da7a2960d7e..9f709c71045577f7b043777a7ad1528a0e2ccc28 100644 --- a/core/general-client/src/general_model.cpp +++ b/core/general-client/src/general_model.cpp @@ -295,25 +295,23 @@ int PredictorClient::batch_predict( for (auto &name : fetch_name) { // int idx = _fetch_name_to_idx[name]; if (_fetch_name_to_type[name] == 0) { - VLOG(2) << "fetch var " << name << " type int64"; - model._int64_value_map[name].resize( - output.insts(0).tensor_array(idx).int64_data_size()); + VLOG(2) << "ferch var " << name << "type int64"; int size = output.insts(0).tensor_array(idx).int64_data_size(); - for (int i = 0; i < size; ++i) { - model._int64_value_map[name][i] = - output.insts(0).tensor_array(idx).int64_data(i); - } + model._int64_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).int64_data().begin(), + output.insts(0).tensor_array(idx).int64_data().begin() + size); } else if (_fetch_name_to_type[name] == 1) { - VLOG(2) << "fetch var " << name << " type float"; - model._float_value_map[name].resize( - output.insts(0).tensor_array(idx).float_data_size()); + VLOG(2) << "fetch var " << name << "type float"; int size = output.insts(0).tensor_array(idx).float_data_size(); - for (int i = 0; i < size; ++i) { - model._float_value_map[name][i] = - output.insts(0).tensor_array(idx).float_data(i); - } + model._float_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).float_data().begin(), + output.insts(0).tensor_array(idx).float_data().begin() + size); } else if (_fetch_name_to_type[name] == 2) { - VLOG(2) << "fetch var " << name << " type int32"; + VLOG(2) << "fetch var " << name << "type int32"; + int size = output.insts(0).tensor_array(idx).int_data_size(); + model._int32_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).int_data().begin(), + output.insts(0).tensor_array(idx).int_data().begin() + size); } idx += 1; @@ -601,31 +599,22 @@ int PredictorClient::numpy_predict( // int idx = _fetch_name_to_idx[name]; if (_fetch_name_to_type[name] == 0) { VLOG(2) << "ferch var " << name << "type int64"; - model._int64_value_map[name].resize( - output.insts(0).tensor_array(idx).int64_data_size()); int size = output.insts(0).tensor_array(idx).int64_data_size(); - for (int i = 0; i < size; ++i) { - model._int64_value_map[name][i] = - output.insts(0).tensor_array(idx).int64_data(i); - } + model._int64_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).int64_data().begin(), + output.insts(0).tensor_array(idx).int64_data().begin() + size); } else if (_fetch_name_to_type[name] == 1) { VLOG(2) << "fetch var " << name << "type float"; - model._float_value_map[name].resize( - output.insts(0).tensor_array(idx).float_data_size()); int size = output.insts(0).tensor_array(idx).float_data_size(); - for (int i = 0; i < size; ++i) { - model._float_value_map[name][i] = - output.insts(0).tensor_array(idx).float_data(i); - } + model._float_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).float_data().begin(), + output.insts(0).tensor_array(idx).float_data().begin() + size); } else if (_fetch_name_to_type[name] == 2) { VLOG(2) << "fetch var " << name << "type int32"; - model._int32_value_map[name].resize( - output.insts(0).tensor_array(idx).int_data_size()); int size = output.insts(0).tensor_array(idx).int_data_size(); - for (int i = 0; i < size; ++i) { - model._int64_value_map[name][i] = - output.insts(0).tensor_array(idx).int_data(i); - } + model._int32_value_map[name] = std::vector( + output.insts(0).tensor_array(idx).int_data().begin(), + output.insts(0).tensor_array(idx).int_data().begin() + size); } idx += 1; } diff --git a/doc/COMPILE.md b/doc/COMPILE.md index f4a6639bdb38fac97662084f7d927d24b6179717..734d32d8ff60aee69c4267cfa4b00e96514bf389 100644 --- a/doc/COMPILE.md +++ b/doc/COMPILE.md @@ -43,7 +43,7 @@ In the default centos7 image we provide, the Python path is `/usr/bin/python`. I ### Integrated CPU version paddle inference library ``` shell -mkdir build && cd build +mkdir server-build-cpu && cd server-build-cpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON .. make -j10 ``` @@ -53,7 +53,7 @@ you can execute `make install` to put targets under directory `./output`, you ne ### Integrated GPU version paddle inference library ``` shell -mkdir build && cd build +mkdir server-build-gpu && cd server-build-gpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON -DWITH_GPU=ON .. make -j10 ``` @@ -65,7 +65,7 @@ execute `make install` to put targets under directory `./output` ## Compile Client ``` shell -mkdir build && cd build +mkdir client-build && cd client-build cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCLIENT=ON .. make -j10 ``` @@ -75,7 +75,7 @@ execute `make install` to put targets under directory `./output` ## Compile the App ```bash -mkdir build && cd build +mkdir app-build && cd app-build cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DAPP=ON .. make ``` diff --git a/doc/COMPILE_CN.md b/doc/COMPILE_CN.md index d8fd277131d7d169c1a47689e15556e5d10a0fdb..1d5d60bdff34a2561ca830faf8fe3404a4a9fd96 100644 --- a/doc/COMPILE_CN.md +++ b/doc/COMPILE_CN.md @@ -43,7 +43,7 @@ export PYTHONROOT=/usr/ ### 集成CPU版本Paddle Inference Library ``` shell -mkdir build && cd build +mkdir server-build-cpu && cd server-build-cpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON .. make -j10 ``` @@ -53,7 +53,7 @@ make -j10 ### 集成GPU版本Paddle Inference Library ``` shell -mkdir build && cd build +mkdir server-build-gpu && cd server-build-gpu cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON -DWITH_GPU=ON .. make -j10 ``` @@ -65,7 +65,7 @@ make -j10 ## 编译Client部分 ``` shell -mkdir build && cd build +mkdir client-build && cd client-build cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCLIENT=ON .. make -j10 ``` @@ -75,7 +75,7 @@ make -j10 ## 编译App部分 ```bash -mkdir build && cd build +mkdir app-build && cd app-build cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DCMAKE_INSTALL_PREFIX=./output -DAPP=ON .. make ``` diff --git a/doc/LATEST_PACKAGES.md b/doc/LATEST_PACKAGES.md index 8756743a5c23778ea2d4753a693a272d5f6eb992..8dc196c0b2d91262c284edcbf5d724f11d200713 100644 --- a/doc/LATEST_PACKAGES.md +++ b/doc/LATEST_PACKAGES.md @@ -3,45 +3,45 @@ ## CPU server ### Python 3 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.0-py3-none-any.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.1-py3-none-any.whl ``` ### Python 2 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.0-py2-none-any.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.3.1-py2-none-any.whl ``` ## GPU server ### Python 3 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.0-py3-none-any.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.1-py3-none-any.whl ``` ### Python 2 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.0-py2-none-any.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.3.1-py2-none-any.whl ``` ## Client ### Python 3.7 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.0-cp37-none-manylinux1_x86_64.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.1-cp37-none-any.whl ``` ### Python 3.6 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.0-cp36-none-manylinux1_x86_64.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.1-cp36-none-any.whl ``` ### Python 2.7 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.0-cp27-none-manylinux1_x86_64.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.3.1-cp27-none-any.whl ``` ## App ### Python 3 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.0-py3-none-any.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.1-py3-none-any.whl ``` ### Python 2 ``` -https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.0-py2-none-any.whl +https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.1.1-py2-none-any.whl ``` diff --git a/python/examples/yolov4/README.md b/python/examples/yolov4/README.md new file mode 100644 index 0000000000000000000000000000000000000000..08e16026d79ef7e93df732359f2c17609d4a2d0d --- /dev/null +++ b/python/examples/yolov4/README.md @@ -0,0 +1,23 @@ +# Yolov4 Detection Service + +([简体中文](README_CN.md)|English) + +## Get Model + +``` +python -m paddle_serving_app.package --get_model yolov4 +tar -xzvf yolov4.tar.gz +``` + +## Start RPC Service + +``` +python -m paddle_serving_server_gpu.serve --model yolov4_model --port 9393 --gpu_ids 0 +``` + +## Prediction + +``` +python test_client.py 000000570688.jpg +``` +After the prediction is completed, a json file to save the prediction result and a picture with the detection result box will be generated in the `./outpu folder. diff --git a/python/examples/yolov4/README_CN.md b/python/examples/yolov4/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..a4eed96b08619d4602cbd012a676a9adb6e08a63 --- /dev/null +++ b/python/examples/yolov4/README_CN.md @@ -0,0 +1,24 @@ +# Yolov4 检测服务 + +(简体中文|[English](README.md)) + +## 获取模型 + +``` +python -m paddle_serving_app.package --get_model yolov4 +tar -xzvf yolov4.tar.gz +``` + +## 启动RPC服务 + +``` +python -m paddle_serving_server_gpu.serve --model yolov4_model --port 9393 --gpu_ids 0 +``` + +## 预测 + +``` +python test_client.py 000000570688.jpg +``` + +预测完成会在`./output`文件夹下生成保存预测结果的json文件以及标出检测结果框的图片。 diff --git a/python/examples/yolov4/test_client.py b/python/examples/yolov4/test_client.py index 15053de4d003314b91391b7f12df008710347a14..92dcd06552ca1fdd3f2d54060e9de501f052e349 100644 --- a/python/examples/yolov4/test_client.py +++ b/python/examples/yolov4/test_client.py @@ -23,11 +23,11 @@ preprocess = Sequential([ (2, 0, 1)) ]) -postprocess = RCNNPostprocess("label_list.txt", "output") +postprocess = RCNNPostprocess("label_list.txt", "output", [608, 608]) client = Client() client.load_client_config("yolov4_client/serving_client_conf.prototxt") -client.connect(['127.0.0.1:9300']) +client.connect(['127.0.0.1:9393']) im = preprocess(sys.argv[1]) print(im.shape) diff --git a/python/paddle_serving_app/models/model_list.py b/python/paddle_serving_app/models/model_list.py index 0c26a59f6f0537b9c910f21062938d4720d4f9f4..79b3f91bd6584d17ddbc4124584cf40bd586b965 100644 --- a/python/paddle_serving_app/models/model_list.py +++ b/python/paddle_serving_app/models/model_list.py @@ -24,7 +24,7 @@ class ServingModels(object): "SentimentAnalysis"] = ["senta_bilstm", "senta_bow", "senta_cnn"] self.model_dict["SemanticRepresentation"] = ["ernie"] self.model_dict["ChineseWordSegmentation"] = ["lac"] - self.model_dict["ObjectDetection"] = ["faster_rcnn"] + self.model_dict["ObjectDetection"] = ["faster_rcnn", "yolov4"] self.model_dict["ImageSegmentation"] = [ "unet", "deeplabv3", "deeplabv3+cityscapes" ] diff --git a/python/paddle_serving_app/reader/image_reader.py b/python/paddle_serving_app/reader/image_reader.py index dc029bf0409179f1d392ce05d007565cd3007085..a44ca5de84da2bafce9b4cea37fb88095debabc6 100644 --- a/python/paddle_serving_app/reader/image_reader.py +++ b/python/paddle_serving_app/reader/image_reader.py @@ -280,10 +280,11 @@ class SegPostprocess(object): class RCNNPostprocess(object): - def __init__(self, label_file, output_dir): + def __init__(self, label_file, output_dir, resize_shape=None): self.output_dir = output_dir self.label_file = label_file self.label_list = [] + self.resize_shape = resize_shape with open(label_file) as fin: for line in fin: self.label_list.append(line.strip()) @@ -378,6 +379,13 @@ class RCNNPostprocess(object): xmax = xmin + w ymax = ymin + h + img_w, img_h = image.size + if self.resize_shape is not None: + xmin = xmin * img_w / self.resize_shape[0] + xmax = xmax * img_w / self.resize_shape[0] + ymin = ymin * img_h / self.resize_shape[1] + ymax = ymax * img_h / self.resize_shape[1] + color = tuple(color_list[catid]) # draw bbox diff --git a/python/paddle_serving_app/version.py b/python/paddle_serving_app/version.py index c91808f95e7a5b62729eb630a3203ad42f7a5889..92ef4bed8a2fa14f0f7e686f0424db23a1e7d227 100644 --- a/python/paddle_serving_app/version.py +++ b/python/paddle_serving_app/version.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving App version string """ -serving_app_version = "0.1.0" +serving_app_version = "0.1.1" diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py index 5f63302b0a26e3575bf82390875bb42bb4fc08a9..37f52c48b4c168d93f877a4a7cd4f1bd9afc8b1d 100644 --- a/python/paddle_serving_client/__init__.py +++ b/python/paddle_serving_client/__init__.py @@ -404,7 +404,13 @@ class MultiLangClient(object): self._parse_model_config(path) def connect(self, endpoint): - self.channel_ = grpc.insecure_channel(endpoint[0]) #TODO + # https://github.com/tensorflow/serving/issues/1382 + options = [('grpc.max_receive_message_length', 512 * 1024 * 1024), + ('grpc.max_send_message_length', 512 * 1024 * 1024), + ('grpc.max_receive_message_length', 512 * 1024 * 1024)] + + self.channel_ = grpc.insecure_channel( + endpoint[0], options=options) #TODO self.stub_ = multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelServiceStub( self.channel_) diff --git a/python/paddle_serving_client/version.py b/python/paddle_serving_client/version.py index 5a1f35c598f044e80cff12ce661ff80a61647543..4e024f00030d1fcf9a5eec7a2d8aec6d7c029251 100644 --- a/python/paddle_serving_client/version.py +++ b/python/paddle_serving_client/version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving Client version string """ -serving_client_version = "0.3.0" -serving_server_version = "0.3.0" -module_proto_version = "0.3.0" +serving_client_version = "0.3.1" +serving_server_version = "0.3.1" +module_proto_version = "0.3.1" diff --git a/python/paddle_serving_server/version.py b/python/paddle_serving_server/version.py index 5a1f35c598f044e80cff12ce661ff80a61647543..4e024f00030d1fcf9a5eec7a2d8aec6d7c029251 100644 --- a/python/paddle_serving_server/version.py +++ b/python/paddle_serving_server/version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving Client version string """ -serving_client_version = "0.3.0" -serving_server_version = "0.3.0" -module_proto_version = "0.3.0" +serving_client_version = "0.3.1" +serving_server_version = "0.3.1" +module_proto_version = "0.3.1" diff --git a/python/paddle_serving_server_gpu/version.py b/python/paddle_serving_server_gpu/version.py index 5a1f35c598f044e80cff12ce661ff80a61647543..4e024f00030d1fcf9a5eec7a2d8aec6d7c029251 100644 --- a/python/paddle_serving_server_gpu/version.py +++ b/python/paddle_serving_server_gpu/version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Paddle Serving Client version string """ -serving_client_version = "0.3.0" -serving_server_version = "0.3.0" -module_proto_version = "0.3.0" +serving_client_version = "0.3.1" +serving_server_version = "0.3.1" +module_proto_version = "0.3.1" diff --git a/tools/python_tag.py b/tools/python_tag.py index 7c0fb5aa9928bb83c51df698b2f66df17793feb1..9ad7e07d6d1996dbd48e32f9a8d13d09df45c818 100644 --- a/tools/python_tag.py +++ b/tools/python_tag.py @@ -15,6 +15,6 @@ from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag import re with open("setup.cfg", "w") as f: - line = "[bdist_wheel]\npython-tag={0}{1}\nplat-name=manylinux1_x86_64".format( - get_abbr_impl(), get_impl_ver()) + line = "[bdist_wheel]\npython-tag={0}{1}".format(get_abbr_impl(), + get_impl_ver()) f.write(line)