提交 55862ae0 编写于 作者: M MRXLT

fix conflict

......@@ -832,6 +832,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP FOR_SERVING_SIDE SRCS HDRS )
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc")
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h")
set(PDCODEGEN "${CMAKE_BINARY_DIR}/core/pdcodegen/pdcodegen")
if (${FOR_SERVING_SIDE})
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc"
......@@ -842,7 +843,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP FOR_SERVING_SIDE SRCS HDRS )
--plugin=protoc-gen-pdcodegen=${CMAKE_BINARY_DIR}/core/pdcodegen/pdcodegen
--proto_path=${CMAKE_SOURCE_DIR}/core/predictor/proto
${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} ${PDCODEGEN}
COMMENT "Running Paddle-serving C++ protocol buffer compiler on ${FIL}"
VERBATIM)
else()
......@@ -854,7 +855,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP FOR_SERVING_SIDE SRCS HDRS )
--pdcodegen_out=${CMAKE_CURRENT_BINARY_DIR}
--plugin=protoc-gen-pdcodegen=${CMAKE_BINARY_DIR}/pdcodegen/pdcodegen
${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} ${PDCODEGEN}
COMMENT "Running Paddle-serving C++ protocol buffer compiler on ${FIL}"
VERBATIM)
endif()
......
......@@ -45,7 +45,28 @@ class EndpointRouterBase {
class WeightedRandomRender : public EndpointRouterBase {
public:
static int register_self() {
INLINE_REGIST_OBJECT(WeightedRandomRender, EndpointRouterBase, -1);
// INLINE_REGIST_OBJECT(WeightedRandomRender, EndpointRouterBase, -1);
Factory<WeightedRandomRender, EndpointRouterBase>* factory =
new (std::nothrow) Factory<WeightedRandomRender, EndpointRouterBase>();
if (factory == NULL) {
RAW_LOG_ERROR(
"Failed regist factory: WeightedRandomRender->EndpointRouterBase in "
"macro!");
return -1;
}
// When two clients are created in the same process, two
// "WeightedRandomRender" factory objects are registered.
// But in fact, the two clients can use one factory object
// together.
if (FactoryPool<EndpointRouterBase>::instance().register_factory(
"WeightedRandomRender", factory) != 0) {
RAW_LOG_INFO(
"Factory has been registed: "
"WeightedRandomRender->EndpointRouterBase.");
}
return 0;
}
......
......@@ -37,6 +37,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP FOR_SERVING_SIDE SRCS HDRS )
list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc")
list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h")
set(PDCODEGEN "${CMAKE_BINARY_DIR}/core/pdcodegen/pdcodegen")
if (${FOR_SERVING_SIDE})
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc"
......@@ -47,7 +48,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP FOR_SERVING_SIDE SRCS HDRS )
--plugin=protoc-gen-pdcodegen=${CMAKE_BINARY_DIR}/core/pdcodegen/pdcodegen
--proto_path=${CMAKE_SOURCE_DIR}/core/predictor/proto
${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} ${PDCODEGEN}
COMMENT "Running Paddle-serving C++ protocol buffer compiler on ${FIL}"
VERBATIM)
else()
......@@ -59,7 +60,7 @@ function(PROTOBUF_GENERATE_SERVING_CPP FOR_SERVING_SIDE SRCS HDRS )
--pdcodegen_out=${CMAKE_CURRENT_BINARY_DIR}
--plugin=protoc-gen-pdcodegen=${CMAKE_BINARY_DIR}/core/pdcodegen/pdcodegen
${_protobuf_include_path} ${ABS_FIL}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE}
DEPENDS ${ABS_FIL} ${Protobuf_PROTOC_EXECUTABLE} ${PDCODEGEN}
COMMENT "Running Paddle-serving C++ protocol buffer compiler on ${FIL}"
VERBATIM)
endif()
......
......@@ -14,7 +14,7 @@ Deep neural nets often have some preprocessing steps on input data, and postproc
## How to define Node
PaddleServing has some predefined Computation Node in the framework. A very commonly used Computation Graph is the simple reader-inference-response mode that can cover most of the single model inference scenarios. A example graph and the corresponding DAG defination code is as follows.
PaddleServing has some predefined Computation Node in the framework. A very commonly used Computation Graph is the simple reader-inference-response mode that can cover most of the single model inference scenarios. A example graph and the corresponding DAG definition code is as follows.
<center>
<img src='simple_dag.png' width = "260" height = "370" align="middle"/>
</center>
......
......@@ -5,9 +5,9 @@
Paddle Serving is Paddle's high-performance online inference service framework, which can flexibly support the deployment of most models. In this article, the IMDB review sentiment analysis task is used as an example to show the entire process from model training to deployment of inference service through 9 steps.
## Step1:Prepare for Running Environment
Paddle Serving can be deployed on Linux environments such as Centos and Ubuntu. On other systems or in environments where you do not want to install the serving module, you can still access the server-side prediction service through the http service.
Paddle Serving can be deployed on Linux environments.Currently the server supports deployment on Centos7. [Docker deployment is recommended](RUN_IN_DOCKER.md). The rpc client supports deploymen on Centos7 and Ubuntu 18.On other systems or in environments where you do not want to install the serving module, you can still access the server-side prediction service through the http service.
You can choose to install the cpu or gpu version of the server module according to the requirements and machine environment, and install the client module on the client machine. When you want to access the server with http
You can choose to install the cpu or gpu version of the server module according to the requirements and machine environment, and install the client module on the client machine. When you want to access the server with http, there is not need to install client module.
```shell
pip install paddle_serving_server #cpu version server side
......@@ -228,7 +228,7 @@ if __name__ == "__main__":
</details>
! [Training process](./ imdb_loss.png) As can be seen from the above figure, the loss of the model starts to converge after the 65th round. We save the model and configuration file after the 65th round of training is completed. The saved files are divided into imdb_cnn_client_conf and imdb_cnn_model folders. The former contains client-side configuration files, and the latter contains server-side configuration files and saved model files.
![Training process](./imdb_loss.png) As can be seen from the above figure, the loss of the model starts to converge after the 65th round. We save the model and configuration file after the 65th round of training is completed. The saved files are divided into imdb_cnn_client_conf and imdb_cnn_model folders. The former contains client-side configuration files, and the latter contains server-side configuration files and saved model files.
The parameter list of the save_model function is as follows:
| Parameter | Meaning |
......@@ -243,10 +243,10 @@ The parameter list of the save_model function is as follows:
The Paddle Serving framework supports two types of prediction service methods. One is to communicate through RPC and the other is to communicate through HTTP. The deployment and use of RPC prediction service will be introduced first. The deployment and use of HTTP prediction service will be introduced at Step 8. .
`` `shell
```shell
python -m paddle_serving_server.serve --model imdb_cnn_model / --port 9292 #cpu prediction service
python -m paddle_serving_server_gpu.serve --model imdb_cnn_model / --port 9292 --gpu_ids 0 #gpu prediction service
`` `
```
The parameter --model in the command specifies the server-side model and configuration file directory previously saved, --port specifies the port of the prediction service. When deploying the gpu prediction service using the gpu version, you can use --gpu_ids to specify the gpu used.
......@@ -287,13 +287,13 @@ The script receives data from standard input and prints out the probability that
The client implemented in the previous step runs the prediction service as an example. The usage method is as follows:
`` `shell
```shell
cat test_data/part-0 | python test_client.py imdb_lstm_client_conf / serving_client_conf.prototxt imdb.vocab
`` `
```
Using 2084 samples in the test_data/part-0 file for test testing, the model prediction accuracy is 88.19%.
** Note **: The effect of each model training may be slightly different, and the accuracy of predictions using the trained model will be close to the examples but may not be exactly the same.
**Note**: The effect of each model training may be slightly different, and the accuracy of predictions using the trained model will be close to the examples but may not be exactly the same.
## Step8: Deploy HTTP Prediction Service
......@@ -349,13 +349,13 @@ In the above command, the first parameter is the saved server-side model and con
## Step9: Call the prediction service with plaintext data
After starting the HTTP prediction service, you can make prediction with a single command:
`` `
```
curl -H "Content-Type: application / json" -X POST -d '{"words": "i am very sad | 0", "fetch": ["prediction"]}' http://127.0.0.1:9292/imdb/prediction
`` `
```
When the inference process is normal, the prediction probability is returned, as shown below.
`` `
```
{"prediction": [0.5592559576034546,0.44074398279190063]}
`` `
```
** Note **: The effect of each model training may be slightly different, and the inferred probability value using the trained model may not be consistent with the example.
**Note**: The effect of each model training may be slightly different, and the inferred probability value using the trained model may not be consistent with the example.
......@@ -6,9 +6,9 @@ Paddle Serving是Paddle的高性能在线预测服务框架,可以灵活支持
## Step1:准备环境
Paddle Serving可以部署在Centos和Ubuntu等Linux环境上,在其他系统上或者不希望安装serving模块的环境中仍然可以通过http服务来访问server端的预测服务。
Paddle Serving可以部署在Linux环境上,目前server端支持在Centos7上部署,推荐使用[Docker部署](RUN_IN_DOCKER_CN.md)。rpc client端可以在Centos7和Ubuntu18上部署,在其他系统上或者不希望安装serving模块的环境中仍然可以通过http服务来访问server端的预测服务。
可以根据需求和机器环境来选择安装cpu或gpu版本的server模块,在client端机器上安装client模块。当希望同http来访问server端
可以根据需求和机器环境来选择安装cpu或gpu版本的server模块,在client端机器上安装client模块。使用http请求的方式来访问server时,client端机器不需要安装client模块。
```shell
pip install paddle_serving_server #cpu版本server端
......
## CTR预测服务
## CTR Prediction Service
### 获取样例数据
([简体中文](./README_CN.md)|English)
### download criteo dataset
```
sh get_data.sh
```
### 保存模型和配置文件
### download inference model
```
python local_train.py
wget https://paddle-serving.bj.bcebos.com/criteo_ctr_example/criteo_ctr_demo_model.tar.gz
tar xf criteo_ctr_demo_model.tar.gz
mv models/ctr_client_conf .
mv models/ctr_serving_model .
```
执行脚本后会在当前目录生成serving_server_model和serving_client_config文件夹。
the directories like serving_server_model and serving_client_config will appear.
### 启动RPC预测服务
### Start RPC Inference Service
```
python -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 #启动CPU预测服务
python -m paddle_serving_server_gpu.serve --model ctr_serving_model/ --port 9292 --gpu_ids 0 #在GPU 0上启动预测服务
python -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 #CPU RPC Service
python -m paddle_serving_server_gpu.serve --model ctr_serving_model/ --port 9292 --gpu_ids 0 #RPC Service on GPU 0
```
### 执行预测
### RPC Infer
```
python test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/
```
预测完毕会输出预测过程的耗时。
the latency will display in the end.
## CTR预测服务
(简体中文|[English](./README.md))
### 获取样例数据
```
sh get_data.sh
```
### 下载模型
```
wget https://paddle-serving.bj.bcebos.com/criteo_ctr_example/criteo_ctr_demo_model.tar.gz
tar xf criteo_ctr_demo_model.tar.gz
mv models/ctr_client_conf .
mv models/ctr_serving_model .
```
会在当前目录出现serving_server_model和serving_client_config文件夹。
### 启动RPC预测服务
```
python -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 #启动CPU预测服务
python -m paddle_serving_server_gpu.serve --model ctr_serving_model/ --port 9292 --gpu_ids 0 #在GPU 0上启动预测服务
```
### 执行预测
```
python test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/
```
预测完毕会输出预测过程的耗时。
......@@ -2,21 +2,35 @@
([简体中文](./README_CN.md)|English)
### Compile Source Code
in the root directory of this git project
```
mkdir build_server
cd build_server
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib64/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON ..
make -j10
make install -j10
```
### Get Sample Dataset
go to directory `python/examples/criteo_ctr_with_cube`
```
sh get_data.sh
```
### Train and Save Model
### Download Model and Sparse Parameter Sequence Files
```
python local_train.py
wget https://paddle-serving.bj.bcebos.com/unittest/ctr_cube_unittest.tar.gz
tar xf ctr_cube_unittest.tar.gz
mv models/ctr_client_conf ./
mv models/ctr_serving_model_kv ./
mv models/data ./cube/
```
the trained model will be in ./ctr_server_model and ./ctr_client_config, and ctr_server_model_kv, ctr_client_conf_kv。
the model will be in ./ctr_server_model_kv and ./ctr_client_config.
### Start Sparse Parameter Indexing Service
```
cp ../../../build_server/core/predictor/seq_generator seq_generator
cp ../../../build_server/output/bin/cube* ./cube/
sh cube_prepare.sh &
```
......
## 带稀疏参数索引服务的CTR预测服务
(简体中文|[English](./README.md))
### 编译源代码
在本项目的根目录下,执行
```
mkdir build_server
cd build_server
cmake -DPYTHON_INCLUDE_DIR=$PYTHONROOT/include/python2.7/ -DPYTHON_LIBRARIES=$PYTHONROOT/lib64/libpython2.7.so -DPYTHON_EXECUTABLE=$PYTHONROOT/bin/python -DSERVER=ON ..
make -j10
make install -j10
```
### 获取样例数据
进入目录 `python/examples/criteo_ctr_with_cube`
```
sh get_data.sh
```
### 保存模型和配置文件
### 下载模型和稀疏参数序列文件
```
python local_train.py
wget https://paddle-serving.bj.bcebos.com/unittest/ctr_cube_unittest.tar.gz
tar xf ctr_cube_unittest.tar.gz
mv models/ctr_client_conf ./
mv models/ctr_serving_model_kv ./
mv models/data ./cube/
```
执行脚本后会在当前目录生成ctr_server_model和ctr_client_config文件夹,以及ctr_server_model_kv, ctr_client_conf_kv
执行脚本后会在当前目录有ctr_server_model_kv和ctr_client_config文件夹
### 启动稀疏参数索引服务
```
cp ../../../build_server/core/predictor/seq_generator seq_generator
cp ../../../build_server/output/bin/cube* ./cube/
sh cube_prepare.sh &
```
......
......@@ -156,7 +156,9 @@ class Client(object):
)
else:
if self.predictor_sdk_ is None:
self.add_variant('var1', endpoints, 100)
timestamp = time.time()
self.add_variant('default_tag_{}'.format(timestamp), endpoints,
100)
else:
print(
"parameter endpoints({}) will not take effect, because you use the add_variant function.".
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册