diff --git a/README_CN.md b/README_CN.md
index 547a50e7a430f3afb3a69eae211ed87cb248a268..298b8156e34d6c8834babcb68ae8f7a466b156af 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -1,9 +1,12 @@
+(简体中文|[English](./README.md))
+
+
diff --git a/python/examples/bert/bert_web_service.py b/python/examples/bert/bert_web_service.py
index d72150878c51d4f95bbc5d2263ad00fb1ed2c387..b1898b2cc0ee690dd075958944a56fed27dce29a 100644
--- a/python/examples/bert/bert_web_service.py
+++ b/python/examples/bert/bert_web_service.py
@@ -21,7 +21,10 @@ import os
class BertService(WebService):
def load(self):
- self.reader = ChineseBertReader(vocab_file="vocab.txt", max_seq_len=128)
+ self.reader = ChineseBertReader({
+ "vocab_file": "vocab.txt",
+ "max_seq_len": 128
+ })
def preprocess(self, feed=[], fetch=[]):
feed_res = [
diff --git a/python/examples/deeplabv3/README.md b/python/examples/deeplabv3/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3eb5c84e2d5be7c7a1448940c758e60d77bd56e6
--- /dev/null
+++ b/python/examples/deeplabv3/README.md
@@ -0,0 +1,22 @@
+# Image Segmentation
+
+## Get Model
+
+```
+python -m paddle_serving_app.package --get_model deeplabv3
+tar -xzvf deeplabv3.tar.gz
+```
+
+## RPC Service
+
+### Start Service
+
+```
+python -m paddle_serving_server_gpu.serve --model deeplabv3_server --gpu_ids 0 --port 9494
+```
+
+### Client Prediction
+
+```
+python deeplabv3_client.py
+```
diff --git a/python/examples/deeplabv3/README_CN.md b/python/examples/deeplabv3/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..a25bb2d059df49568056664493c1c96b999005b2
--- /dev/null
+++ b/python/examples/deeplabv3/README_CN.md
@@ -0,0 +1,21 @@
+# 图像分割
+
+## 获取模型
+
+```
+python -m paddle_serving_app.package --get_model deeplabv3
+tar -xzvf deeplabv3.tar.gz
+```
+
+## RPC 服务
+
+### 启动服务端
+
+```
+python -m paddle_serving_server_gpu.serve --model deeplabv3_server --gpu_ids 0 --port 9494
+```
+
+### 客户端预测
+
+```
+python deeplabv3_client.py
diff --git a/python/examples/deeplabv3/deeplabv3_client.py b/python/examples/deeplabv3/deeplabv3_client.py
index 75ea6b0a01868af30c94fb0686159571c2c1c966..77e25d5f5a24d0aa1dad8939c1e7845eaf5e4122 100644
--- a/python/examples/deeplabv3/deeplabv3_client.py
+++ b/python/examples/deeplabv3/deeplabv3_client.py
@@ -18,7 +18,7 @@ import sys
import cv2
client = Client()
-client.load_client_config("seg_client/serving_client_conf.prototxt")
+client.load_client_config("deeplabv3_client/serving_client_conf.prototxt")
client.connect(["127.0.0.1:9494"])
preprocess = Sequential(
diff --git a/python/examples/faster_rcnn_model/README.md b/python/examples/faster_rcnn_model/README.md
index c1d3d40b054fb362bd20c59a9a7fc4d09e89f31b..e31f734e2b8f04ee4cd35258f9da81672b2caf88 100644
--- a/python/examples/faster_rcnn_model/README.md
+++ b/python/examples/faster_rcnn_model/README.md
@@ -12,8 +12,8 @@ If you want to have more detection models, please refer to [Paddle Detection Mod
### Start the service
```
tar xf faster_rcnn_model.tar.gz
-mv faster_rcnn_model/pddet *.
-GLOG_v=2 python -m paddle_serving_server_gpu.serve --model pddet_serving_model --port 9494 --gpu_id 0
+mv faster_rcnn_model/pddet* .
+GLOG_v=2 python -m paddle_serving_server_gpu.serve --model pddet_serving_model --port 9494 --gpu_ids 0
```
### Perform prediction
diff --git a/python/examples/faster_rcnn_model/README_CN.md b/python/examples/faster_rcnn_model/README_CN.md
index a2c3618f071a3650d50c791595bc04ba0c1d378a..3ddccf9e63043e797c9e261c1f26ebe774adb81c 100644
--- a/python/examples/faster_rcnn_model/README_CN.md
+++ b/python/examples/faster_rcnn_model/README_CN.md
@@ -13,7 +13,7 @@ wget https://paddle-serving.bj.bcebos.com/pddet_demo/infer_cfg.yml
```
tar xf faster_rcnn_model.tar.gz
mv faster_rcnn_model/pddet* ./
-GLOG_v=2 python -m paddle_serving_server_gpu.serve --model pddet_serving_model --port 9494 --gpu_id 0
+GLOG_v=2 python -m paddle_serving_server_gpu.serve --model pddet_serving_model --port 9494 --gpu_ids 0
```
### 执行预测
diff --git a/python/examples/imagenet/README_CN.md b/python/examples/imagenet/README_CN.md
index 77ade579ba17ad8247b2f118242642a1d3c79927..081cff528c393ecb5534ec679d6e63739f720f20 100644
--- a/python/examples/imagenet/README_CN.md
+++ b/python/examples/imagenet/README_CN.md
@@ -19,10 +19,10 @@ pip install paddle_serving_app
启动server端
```
-python image_classification_service.py ResNet50_vd_model cpu 9696 #cpu预测服务
+python resnet50_web_service.py ResNet50_vd_model cpu 9696 #cpu预测服务
```
```
-python image_classification_service.py ResNet50_vd_model gpu 9696 #gpu预测服务
+python resnet50_web_service.py ResNet50_vd_model gpu 9696 #gpu预测服务
```
diff --git a/python/examples/mobilenet/README.md b/python/examples/mobilenet/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..496ebdbe2e244af8091cb28cdcdecf7627088ba3
--- /dev/null
+++ b/python/examples/mobilenet/README.md
@@ -0,0 +1,22 @@
+# Image Classification
+
+## Get Model
+
+```
+python -m paddle_serving_app.package --get_model mobilenet_v2_imagenet
+tar -xzvf mobilenet_v2_imagenet.tar.gz
+```
+
+## RPC Service
+
+### Start Service
+
+```
+python -m paddle_serving_server_gpu.serve --model mobilenet_v2_imagenet_model --gpu_ids 0 --port 9393
+```
+
+### Client Prediction
+
+```
+python mobilenet_tutorial.py
+```
diff --git a/python/examples/mobilenet/README_CN.md b/python/examples/mobilenet/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c721b4bd161fbf7c400f1a73ddb7be69c449871
--- /dev/null
+++ b/python/examples/mobilenet/README_CN.md
@@ -0,0 +1,22 @@
+# 图像分类
+
+## 获取模型
+
+```
+python -m paddle_serving_app.package --get_model mobilenet_v2_imagenet
+tar -xzvf mobilenet_v2_imagenet.tar.gz
+```
+
+## RPC 服务
+
+### 启动服务端
+
+```
+python -m paddle_serving_server_gpu.serve --model mobilenet_v2_imagenet_model --gpu_ids 0 --port 9393
+```
+
+### 客户端预测
+
+```
+python mobilenet_tutorial.py
+```
diff --git a/python/examples/resnet_v2_50/README.md b/python/examples/resnet_v2_50/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fd86074c73177a06cd59ebb3bd0c28c7f22e95f2
--- /dev/null
+++ b/python/examples/resnet_v2_50/README.md
@@ -0,0 +1,22 @@
+# Image Classification
+
+## Get Model
+
+```
+python -m paddle_serving_app.package --get_model resnet_v2_50_imagenet
+tar -xzvf resnet_v2_50_imagenet.tar.gz
+```
+
+## RPC Service
+
+### Start Service
+
+```
+python -m paddle_serving_server_gpu.serve --model resnet_v2_50_imagenet_model --gpu_ids 0 --port 9393
+```
+
+### Client Prediction
+
+```
+python resnet50_v2_tutorial.py
+```
diff --git a/python/examples/resnet_v2_50/README_CN.md b/python/examples/resnet_v2_50/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..bda2916eb43d55d718af1095c21869e00fb27093
--- /dev/null
+++ b/python/examples/resnet_v2_50/README_CN.md
@@ -0,0 +1,22 @@
+# 图像分类
+
+## 获取模型
+
+```
+python -m paddle_serving_app.package --get_model resnet_v2_50_imagenet
+tar -xzvf resnet_v2_50_imagenet.tar.gz
+```
+
+## RPC 服务
+
+### 启动服务端
+
+```
+python -m paddle_serving_server_gpu.serve --model resnet_v2_50_imagenet_model --gpu_ids 0 --port 9393
+```
+
+### 客户端预测
+
+```
+python resnet50_v2_tutorial.py
+```
diff --git a/python/examples/unet_for_image_seg/README.md b/python/examples/unet_for_image_seg/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7936ad43cbc3b53719babdf6f91ea46e74a827da
--- /dev/null
+++ b/python/examples/unet_for_image_seg/README.md
@@ -0,0 +1,22 @@
+# Image Segmentation
+
+## Get Model
+
+```
+python -m paddle_serving_app.package --get_model unet
+tar -xzvf unet.tar.gz
+```
+
+## RPC Service
+
+### Start Service
+
+```
+python -m paddle_serving_server_gpu.serve --model unet_model --gpu_ids 0 --port 9494
+```
+
+### Client Prediction
+
+```
+python seg_client.py
+```
diff --git a/python/examples/unet_for_image_seg/README_CN.md b/python/examples/unet_for_image_seg/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..f4b91aaff5697ff8ea3901e0a8084152f6007ff4
--- /dev/null
+++ b/python/examples/unet_for_image_seg/README_CN.md
@@ -0,0 +1,22 @@
+# 图像分割
+
+## 获取模型
+
+```
+python -m paddle_serving_app.package --get_model unet
+tar -xzvf unet.tar.gz
+```
+
+## RPC 服务
+
+### 启动服务端
+
+```
+python -m paddle_serving_server_gpu.serve --model unet_model --gpu_ids 0 --port 9494
+```
+
+### 客户端预测
+
+```
+python seg_client.py
+```
diff --git a/python/paddle_serving_app/README.md b/python/paddle_serving_app/README.md
index a0fd35b7f02ce165f878238a757613c62d2fea26..07fff931e250bad59ef2bedfe1e054f4682f6c9f 100644
--- a/python/paddle_serving_app/README.md
+++ b/python/paddle_serving_app/README.md
@@ -12,7 +12,7 @@ pip install paddle_serving_app
## Get model list
```shell
-python -m paddle_serving_app.package --model_list
+python -m paddle_serving_app.package --list_model
```
## Download pre-training model
diff --git a/python/paddle_serving_app/README_CN.md b/python/paddle_serving_app/README_CN.md
index 2624c238e2dc212f1d10a251ee742891cae6a08c..f6fda8beaf75264d8ae5d2cbb939fdf226c342ab 100644
--- a/python/paddle_serving_app/README_CN.md
+++ b/python/paddle_serving_app/README_CN.md
@@ -11,7 +11,7 @@ pip install paddle_serving_app
## 获取模型列表
```shell
-python -m paddle_serving_app.package --model_list
+python -m paddle_serving_app.package --list_model
```
## 下载预训练模型
diff --git a/python/paddle_serving_app/reader/image_reader.py b/python/paddle_serving_app/reader/image_reader.py
index 7988bf447b5a0a075171d93d22dd1933aa8532b8..7f4a795513447d74e7f02d7741344ccae81c7c9d 100644
--- a/python/paddle_serving_app/reader/image_reader.py
+++ b/python/paddle_serving_app/reader/image_reader.py
@@ -296,7 +296,10 @@ class File2Image(object):
pass
def __call__(self, img_path):
- fin = open(img_path)
+ if py_version == 2:
+ fin = open(img_path)
+ else:
+ fin = open(img_path, "rb")
sample = fin.read()
data = np.fromstring(sample, np.uint8)
img = cv2.imdecode(data, cv2.IMREAD_COLOR)
diff --git a/python/paddle_serving_client/__init__.py b/python/paddle_serving_client/__init__.py
index e3302c14239c8bfc37a6bafb39b112cfed5230fd..f2922f577b21d8acc3f8ec629f2473b5339ee725 100644
--- a/python/paddle_serving_client/__init__.py
+++ b/python/paddle_serving_client/__init__.py
@@ -61,13 +61,18 @@ class SDKConfig(object):
self.tag_list = []
self.cluster_list = []
self.variant_weight_list = []
+ self.rpc_timeout_ms = 20000
+ self.load_balance_strategy = "la"
def add_server_variant(self, tag, cluster, variant_weight):
self.tag_list.append(tag)
self.cluster_list.append(cluster)
self.variant_weight_list.append(variant_weight)
- def gen_desc(self):
+ def set_load_banlance_strategy(self, strategy):
+ self.load_balance_strategy = strategy
+
+ def gen_desc(self, rpc_timeout_ms):
predictor_desc = sdk.Predictor()
predictor_desc.name = "general_model"
predictor_desc.service_name = \
@@ -86,7 +91,7 @@ class SDKConfig(object):
self.sdk_desc.predictors.extend([predictor_desc])
self.sdk_desc.default_variant_conf.tag = "default"
self.sdk_desc.default_variant_conf.connection_conf.connect_timeout_ms = 2000
- self.sdk_desc.default_variant_conf.connection_conf.rpc_timeout_ms = 20000
+ self.sdk_desc.default_variant_conf.connection_conf.rpc_timeout_ms = rpc_timeout_ms
self.sdk_desc.default_variant_conf.connection_conf.connect_retry_count = 2
self.sdk_desc.default_variant_conf.connection_conf.max_connection_per_host = 100
self.sdk_desc.default_variant_conf.connection_conf.hedge_request_timeout_ms = -1
@@ -119,6 +124,7 @@ class Client(object):
self.profile_ = _Profiler()
self.all_numpy_input = True
self.has_numpy_input = False
+ self.rpc_timeout_ms = 20000
def load_client_config(self, path):
from .serving_client import PredictorClient
@@ -171,6 +177,12 @@ class Client(object):
self.predictor_sdk_.add_server_variant(tag, cluster,
str(variant_weight))
+ def set_rpc_timeout_ms(self, rpc_timeout):
+ if not isinstance(rpc_timeout, int):
+ raise ValueError("rpc_timeout must be int type.")
+ else:
+ self.rpc_timeout_ms = rpc_timeout
+
def connect(self, endpoints=None):
# check whether current endpoint is available
# init from client config
@@ -188,7 +200,7 @@ class Client(object):
print(
"parameter endpoints({}) will not take effect, because you use the add_variant function.".
format(endpoints))
- sdk_desc = self.predictor_sdk_.gen_desc()
+ sdk_desc = self.predictor_sdk_.gen_desc(self.rpc_timeout_ms)
self.client_handle_.create_predictor_by_desc(sdk_desc.SerializeToString(
))
diff --git a/python/paddle_serving_server/__init__.py b/python/paddle_serving_server/__init__.py
index 3cb96a8f04922362fdb4b4c497f7679355e3879f..7356de2c2feac126272cf9a771a03146a87ef541 100644
--- a/python/paddle_serving_server/__init__.py
+++ b/python/paddle_serving_server/__init__.py
@@ -23,6 +23,7 @@ import paddle_serving_server as paddle_serving_server
from .version import serving_server_version
from contextlib import closing
import collections
+import fcntl
class OpMaker(object):
@@ -322,6 +323,10 @@ class Server(object):
bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name
self.server_path = os.path.join(self.module_path, floder_name)
+ #acquire lock
+ version_file = open("{}/version.py".format(self.module_path), "r")
+ fcntl.flock(version_file, fcntl.LOCK_EX)
+
if not os.path.exists(self.server_path):
print('Frist time run, downloading PaddleServing components ...')
r = os.system('wget ' + bin_url + ' --no-check-certificate')
@@ -345,6 +350,8 @@ class Server(object):
foemat(self.module_path))
finally:
os.remove(tar_name)
+ #release lock
+ version_file.close()
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py
index 7acc926c7f7fc465da20a7609bc767a5289d2e61..d4631141f8173b4ae0cb41d42c615566ac81ae7e 100644
--- a/python/paddle_serving_server_gpu/__init__.py
+++ b/python/paddle_serving_server_gpu/__init__.py
@@ -25,6 +25,7 @@ from .version import serving_server_version
from contextlib import closing
import argparse
import collections
+import fcntl
def serve_args():
@@ -347,6 +348,11 @@ class Server(object):
download_flag = "{}/{}.is_download".format(self.module_path,
folder_name)
+
+ #acquire lock
+ version_file = open("{}/version.py".format(self.module_path), "r")
+ fcntl.flock(version_file, fcntl.LOCK_EX)
+
if os.path.exists(download_flag):
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
@@ -377,6 +383,8 @@ class Server(object):
format(self.module_path))
finally:
os.remove(tar_name)
+ #release lock
+ version_file.cloes()
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"