未验证 提交 35cb91e9 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #10 from MRXLT/master

change to using local model file
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
通常,PaddlePaddle大规模分类库在训练过程中保存的模型只保存模型参数信息, 通常,PaddlePaddle大规模分类库在训练过程中保存的模型只保存模型参数信息,
而不包括预测模型结构。为了部署PLSC预测库,需要将预训练模型导出为预测模型。 而不包括预测模型结构。为了部署PLSC预测库,需要将预训练模型导出为预测模型。
预测模型包括预测所需要的模型参数和模型结构,用于后续地预测任务(
参见[C++预测库使用](./serving.md) 预测模型包括预测所需要的模型参数和模型结构,用于后续地预测任务(参见[C++预测库使用](./serving.md))
可以通过下面的代码将预训练模型导出为预测模型: 可以通过下面的代码将预训练模型导出为预测模型:
......
...@@ -4,15 +4,16 @@ ...@@ -4,15 +4,16 @@
server端 server端
需要python3环境,下载whl包 需要python3环境运行
```bash
https://paddle-serving.bj.bcebos.com/paddle-gpu-serving/wheel/plsc_serving-0.1.4-py3-none-any.whl pip3 install plsc-serving
```
pip3 install plsc_serving-0.1.4-py3-none-any.whl
client端 client端
需要安装ujson,pip install ujson 需要安装ujson
```bash
pip install ujson
```
复制[client脚本](./serving/client/face_service/face_service.py)到使用路径 复制[client脚本](./serving/client/face_service/face_service.py)到使用路径
...@@ -25,7 +26,8 @@ server端 ...@@ -25,7 +26,8 @@ server端
```python ```python
from plsc_serving.run import PLSCServer from plsc_serving.run import PLSCServer
fs = PLSCServer() fs = PLSCServer()
fs.with_model(model_name = 'face_resnet50') #设定使用的模型文路径,str类型,绝对路径
fs.with_model(model_path = '/XXX/XXX')
#跑单个进程,gpu_index指定使用的gpu,int类型,默认为0;port指定使用的端口,int类型,默认为8866 #跑单个进程,gpu_index指定使用的gpu,int类型,默认为0;port指定使用的端口,int类型,默认为8866
fs.run(gpu_index = 0, port = 8010) fs.run(gpu_index = 0, port = 8010)
``` ```
......
...@@ -12,4 +12,4 @@ ...@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
__version__ = '0.1.4' __version__ = '0.1.6'
...@@ -63,6 +63,9 @@ class PLSCServer(): ...@@ -63,6 +63,9 @@ class PLSCServer():
def modify_conf(self, gpu_index=0): def modify_conf(self, gpu_index=0):
os.chdir(self.get_path()) os.chdir(self.get_path())
engine_name = 'name: "face_resnet50"' engine_name = 'name: "face_resnet50"'
if self.model_path_str == '':
print('Please set model path using with_model.')
return -1
if not self.with_gpu_flag: if not self.with_gpu_flag:
with open('./conf/model_toolkit.prototxt', 'r') as f: with open('./conf/model_toolkit.prototxt', 'r') as f:
conf_str = f.read() conf_str = f.read()
...@@ -86,6 +89,8 @@ class PLSCServer(): ...@@ -86,6 +89,8 @@ class PLSCServer():
'enable_memory_optimization: 1', conf_str) 'enable_memory_optimization: 1', conf_str)
open(conf_file, 'w').write(conf_str) open(conf_file, 'w').write(conf_str)
return 0
def hold(self): def hold(self):
try: try:
while True: while True:
...@@ -99,6 +104,10 @@ class PLSCServer(): ...@@ -99,6 +104,10 @@ class PLSCServer():
os.chdir(self.get_path()) os.chdir(self.get_path())
self.modify_conf(gpu_index) self.modify_conf(gpu_index)
if self.modify_conf(gpu_index) != 0:
print('Modify conf files failed')
return -1
if self.with_gpu_flag == True: if self.with_gpu_flag == True:
gpu_msg = '--gpuid=' + str(gpu_index) + ' ' gpu_msg = '--gpuid=' + str(gpu_index) + ' '
run_cmd = self.gpu_run_cmd + gpu_msg run_cmd = self.gpu_run_cmd + gpu_msg
...@@ -150,16 +159,13 @@ class PLSCServer(): ...@@ -150,16 +159,13 @@ class PLSCServer():
print(conf_str) print(conf_str)
''' '''
def with_model(self, model_name=None, model_url=None): def with_model(self, model_path=None):
''' if not (os.path.isabs(model_path) and os.path.exists(model_path)):
if model_url != None: print('Please set correct absolute path for model file')
self.mode_url = model_url return -1
self.use_other_model = True
''' self.model_path_str = r'model_data_path: "' + model_path + r'"'
if model_name == None or type(model_name) != str:
print('Please set model name string')
os.chdir(self.get_path()) os.chdir(self.get_path())
self.get_model(model_name)
def get_path(self): def get_path(self):
py_path = os.path.dirname(plsc_serving.__file__) py_path = os.path.dirname(plsc_serving.__file__)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册