提交 91ba4654 编写于 作者: W wangjiawei04

merge with develop

......@@ -99,7 +99,8 @@ PYBIND11_MODULE(serving_client, m) {
fetch_name,
predict_res_batch,
pid);
});
},
py::call_guard<py::gil_scoped_release>());
}
} // namespace general_model
......
FROM registry.baidu.com/public/centos6u3-online:gcc482
MAINTAINER predictor@baidu.com
LABEL Description="paddle serving docker image"
USER root
RUN echo "Enjoy your paddle serving journey!"
ADD conf /home/work/paddle-serving/conf
ADD data /home/work/paddle-serving/data
ADD bin /home/work/paddle-serving/bin
RUN wget ftp://st01-rdqa-dev055-wanlijin01.epc.baidu.com/home/users/wanlijin01/workspace/baidu/paddle-serving/predictor/data.tar.gz -O /tmp/data.tar.gz \
&& tar -C /home/work/paddle-serving -xvzf /tmp/data.tar.gz \
&& rm /tmp/data.tar.gz \
&& cd /home/work/paddle-serving/ \
&& chmod a+x bin/pdserving \
&& chmod a+x bin/start.sh \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/workflow.conf \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/resource.conf \
&& sed -i 's/\.\/log/\/home\/work\/paddle-serving\/log/g' conf/log.conf \
&& sed -i 's/\.\/data/\/home\/work\/paddle-serving\/data/g' conf/model_toolkit.conf \
&& mkdir -p /home/work/paddle-serving/log
CMD sh /home/work/paddle-serving/bin/start.sh -c "trap : TERM INT; sleep infinity & wait"
FROM registry.baidu.com/paddlecloud/paddlecloud-runenv-centos6u3-bce:paddlecloud-fluid-gcc482-cuda8.0_cudnn5_bce
MAINTAINER predictor@baidu.com
LABEL Description="paddle serving docker image"
USER root
RUN echo "Enjoy your paddle serving journey!"
ADD conf /home/work/paddle-serving/conf
ADD data /home/work/paddle-serving/data
ADD bin /home/work/paddle-serving/bin
RUN wget ftp://st01-rdqa-dev055-wanlijin01.epc.baidu.com/home/users/wanlijin01/workspace/baidu/paddle-serving/predictor/data.tar.gz -O /tmp/data.tar.gz \
&& tar -C /home/work/paddle-serving -xvzf /tmp/data.tar.gz \
&& rm /tmp/data.tar.gz \
&& cd /home/work/paddle-serving/ \
&& chmod a+x bin/pdserving \
&& chmod a+x bin/start.sh \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/workflow.conf \
&& sed -i 's/\.\/conf/\/home\/work\/paddle-serving\/conf/g' conf/resource.conf \
&& sed -i 's/\.\/log/\/home\/work\/paddle-serving\/log/g' conf/log.conf \
&& sed -i 's/\.\/data/\/home\/work\/paddle-serving\/data/g' conf/model_toolkit.conf \
&& mkdir -p /home/work/paddle-serving/log
CMD sh /home/work/paddle-serving/bin/start.sh -c "trap : TERM INT; sleep infinity & wait"
#!/bin/bash
function install_pdserving_lib(){
ret=1
local pdserving_lib_mode=$1
case $pdserving_lib_mode in
local)
local pdserving_local_path=$2
if [ ! -d $pdserving_local_path ]; then
echo "[WARN failed to find local path]"
return ret
fi
lib_name=`basename $pdserving_local_path`
if [ -d ${CITOOLS}/$lib_name ]; then
rm -rf ${CITOOLS}/$lib_name
fi
cp -rf $pdserving_local_path ${CITOOLS}/
source ${CITOOLS}/$lib_name/predictor_build_lib.sh
;;
ftp)
local wgetOptions="--tries=3 --retry-connrefused -r -l0 -nv --limit-rate=50m -nH --cut-dirs=5"
pdserving_lib_ftp_path="ftp://tc-orp-app2.tc.baidu.com:/home/heqing/scmbak/common_lib/pdserving_cts/pdserving_lib"
lib_name=`basename $pdserving_lib_ftp_path`
if [ -d ${CITOOLS}/$lib_name ]; then
rm -rf ${CITOOLS}/$lib_name
fi
echo "wget cmd is :$wgetOptions $pdserving_lib_ftp_path"
echo "lib_name is :${lib_name}"
wget $wgetOptions$cur_dirs $pdserving_lib_ftp_path
mv ${lib_name} ${CITOOLS}/
source ${CITOOLS}/${lib_name}/predictor_build_lib.sh
;;
*)
ret=0
echo "todo"
;;
esac
return $ret
}
CUR_PATH=$(pwd)
WORK_PATH=$(pwd)
WORK_ROOT=${WORK_PATH%%/baidu/*}
#co citools
CITOOLS="${WORK_ROOT}/baidu/fengchao-qa/citools"
if [ -d ${CITOOLS} ];then
rm -rf ${CITOOLS}
fi
git clone --depth 1 ssh://git@icode.baidu.com:8235/baidu/fengchao-qa/citools $CITOOLS >/dev/null
[[ $? != 0 ]] && exit 1
source $CITOOLS/lib/localbuild_lib.sh
#source过后路径可能改变,需要重新赋值
CITOOLS="${WORK_ROOT}/baidu/fengchao-qa/citools"
#install_pdserving_lib
pdserving_lib_mode="ftp"
install_pdserving_lib ${pdserving_lib_mode} #两种模式:如果是local,需要指定本机上pdserving_lib的路径
#source ${CITOOLS}/pdserving_lib/predictor_build_lib.sh
COVMODULEID=8652
TYPE=framework
#执行本模块构建初始化
predictor_build_init
WORKROOT=$WORK_ROOT
#执行构建命令
predictor_build_do $@
exit 0
......@@ -260,6 +260,7 @@ class Op {
```
### 5.4 Interfaces related to framework
Service
......
......@@ -2,7 +2,7 @@
(简体中文|[English](./SAVE.md))
- 目前,Paddle服务提供了一个save_model接口供用户访问,该接口与Paddle的`save_inference_model`类似。
- 目前,Paddle Serving提供了一个save_model接口供用户访问,该接口与Paddle的`save_inference_model`类似。
``` python
import paddle_serving_client.io as serving_io
......
# 使用uwsgi启动HTTP预测服务
在提供的fit_a_line示例中,启动HTTP预测服务后会看到有以下信息:
```shell
web service address:
http://10.127.3.150:9393/uci/prediction
* Serving Flask app "serve" (lazy loading)
* Environment: production
WARNING: This is a development server. Do not use it in a production deployment.
Use a production WSGI server instead.
* Debug mode: off
* Running on http://0.0.0.0:9393/ (Press CTRL+C to quit)
```
这里会提示启动的HTTP服务是开发模式,并不能用于生产环境的部署。Flask启动的服务环境不够稳定也无法承受大量请求的并发,实际部署过程中配合需要WSGI(Web Server Gateway Interface)使用。
下面我们展示一下如何使用[uWSGI](https://github.com/unbit/uwsgi)模块来部署HTTP预测服务用于生产环境。
编写HTTP服务脚本
```python
#uwsgi_service.py
from paddle_serving_server.web_service import WebService
from flask import Flask, request
#配置预测服务
uci_service = WebService(name = "uci")
uci_service.load_model_config("./uci_housing_model")
uci_service.prepare_server(workdir="./workdir", port=int(9500), device="cpu")
uci_service.run_server()
#配置flask服务
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
global uci_service
uci_service._launch_web_service()
service_name = "/" + uci_service.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return uci_service.get_prediction(request)
#run方法用于直接调试中直接启动服务
if __name__ == "__main__":
app_instance.run()
```
使用uwsgi启动HTTP服务
```bash
uwsgi --http :9000 --wsgi-file uwsgi_service.py --callable app_instance --processes 4
```
使用--processes参数可以指定服务的进程数,请注意目前Serving HTTP 服务暂时不支持多线程的方式使用。
更多uWSGI的信息请参考[uWSGI使用文档](https://uwsgi-docs.readthedocs.io/en/latest/)
......@@ -51,6 +51,5 @@ for ei in range(1000):
for i in range(1, 27):
feed_dict["sparse_{}".format(i - 1)] = data[0][i]
fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
#print(fetch_map)
end = time.time()
print(end - start)
......@@ -40,7 +40,7 @@ for ei in range(10000):
for i in range(1, 27):
feed_dict["embedding_{}.tmp_0".format(i - 1)] = data[0][i]
fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
prob_list.append(fetch_map['prob'][1])
prob_list.append(fetch_map['prob'][0][1])
label_list.append(data[0][-1][0])
print(auc(label_list, prob_list))
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
import numpy as np
import sys
client = Client()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9393"])
import paddle
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500),
batch_size=1)
for data in test_reader():
fetch_map = client.predict(
feed={"x": np.array(data[0][0])}, fetch=["price"])
print("{} {}".format(fetch_map["price"][0][0], data[0][1][0]))
......@@ -31,4 +31,4 @@ for line in sys.stdin:
feed = {"words": word_ids}
fetch = ["acc", "cost", "prediction"]
fetch_map = client.predict(feed=feed, fetch=fetch)
print("{} {}".format(fetch_map["prediction"][1], label[0]))
print("{} {}".format(fetch_map["prediction"][0][1], label[0]))
......@@ -235,6 +235,8 @@ class Client(object):
int_feed_names.append(key)
if isinstance(feed_i[key], np.ndarray):
int_shape.append(list(feed_i[key].shape))
else:
int_shape.append(self.feed_shapes_[key])
if isinstance(feed_i[key], np.ndarray):
int_slot.append(np.reshape(feed_i[key], (-1)).tolist())
else:
......@@ -244,6 +246,8 @@ class Client(object):
float_feed_names.append(key)
if isinstance(feed_i[key], np.ndarray):
float_shape.append(list(feed_i[key].shape))
else:
float_shape.append(self.feed_shapes_[key])
if isinstance(feed_i[key], np.ndarray):
float_slot.append(
np.reshape(feed_i[key], (-1)).tolist())
......
......@@ -67,11 +67,15 @@ class WebService(object):
feed_batch=feed, fetch=fetch)
fetch_map_batch = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map_batch)
for key in fetch_map_batch:
fetch_map_batch[key] = fetch_map_batch[key].tolist()
result = {"result": fetch_map_batch}
elif isinstance(feed, dict):
if "fetch" in feed:
del feed["fetch"]
fetch_map = self.client_service.predict(feed=feed, fetch=fetch)
for key in fetch_map:
fetch_map[key] = fetch_map[key][0].tolist()
result = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map)
except ValueError:
......
......@@ -107,6 +107,8 @@ class WebService(object):
fetch_map_batch = self.client.predict(feed=feed, fetch=fetch)
fetch_map_batch = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map_batch)
for key in fetch_map_batch:
fetch_map_batch[key] = fetch_map_batch[key].tolist()
result = {"result": fetch_map_batch}
return result
......
protobuf>=3.1.0
six
paddlepaddle-gpu
numpy
numpy>=1.12, <=1.16.4 ; python_version<"3.5"
......@@ -53,7 +53,7 @@ if '${PACK}' == 'ON':
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0', 'numpy'
'six >= 1.10.0', 'protobuf >= 3.1.0', 'numpy >= 1.12'
]
if not find_package("paddlepaddle") and not find_package("paddlepaddle-gpu"):
......
......@@ -18,6 +18,7 @@ function init() {
export PYTHONROOT=/usr
cd Serving
export SERVING_WORKDIR=$PWD
$PYTHONROOT/bin/python -m pip install -r python/requirements.txt
}
function check_cmd() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册