提交 4e8ff9f7 编写于 作者: B barrierye

Merge branch 'develop' of https://github.com/PaddlePaddle/Serving into fix-timeline

......@@ -55,7 +55,7 @@ pip install paddle-serving-server-gpu # GPU
```
You may need to use a domestic mirror source (in China, you can use the Tsinghua mirror source, add `-i https://pypi.tuna.tsinghua.edu.cn/simple` to pip command) to speed up the download.
Client package support Centos 7 and Ubuntu 18, or you can use HTTP service without install client.
<h2 align="center">Quick Start Example</h2>
......@@ -256,6 +256,7 @@ curl -H "Content-Type:application/json" -X POST -d '{"url": "https://paddle-serv
### Developers
- [How to config Serving native operators on server side?](doc/SERVER_DAG.md)
- [How to develop a new Serving operator?](doc/NEW_OPERATOR.md)
- [How to develop a new Web Service?](doc/NEW_WEB_SERVICE.md)
- [Golang client](doc/IMDB_GO_CLIENT.md)
- [Compile from source code](doc/COMPILE.md)
......
......@@ -262,6 +262,7 @@ curl -H "Content-Type:application/json" -X POST -d '{"url": "https://paddle-serv
### 开发者教程
- [如何配置Server端的计算图?](doc/SERVER_DAG_CN.md)
- [如何开发一个新的General Op?](doc/NEW_OPERATOR_CN.md)
- [如何开发一个新的Web Service?](doc/NEW_WEB_SERVICE_CN.md)
- [如何在Paddle Serving使用Go Client?](doc/IMDB_GO_CLIENT_CN.md)
- [如何编译PaddleServing?](doc/COMPILE_CN.md)
......
......@@ -119,7 +119,7 @@ int PredictorClient::create_predictor_by_desc(const std::string &sdk_desc) {
LOG(ERROR) << "Predictor Creation Failed";
return -1;
}
_api.thrd_initialize();
// _api.thrd_initialize();
return 0;
}
......@@ -130,7 +130,7 @@ int PredictorClient::create_predictor() {
LOG(ERROR) << "Predictor Creation Failed";
return -1;
}
_api.thrd_initialize();
// _api.thrd_initialize();
return 0;
}
......@@ -152,7 +152,7 @@ int PredictorClient::batch_predict(
int fetch_name_num = fetch_name.size();
_api.thrd_clear();
_api.thrd_initialize();
std::string variant_tag;
_predictor = _api.fetch_predictor("general_model", &variant_tag);
predict_res_batch.set_variant_tag(variant_tag);
......@@ -247,8 +247,9 @@ int PredictorClient::batch_predict(
} else {
client_infer_end = timeline.TimeStampUS();
postprocess_start = client_infer_end;
VLOG(2) << "get model output num";
uint32_t model_num = res.outputs_size();
VLOG(2) << "model num: " << model_num;
for (uint32_t m_idx = 0; m_idx < model_num; ++m_idx) {
VLOG(2) << "process model output index: " << m_idx;
auto output = res.outputs(m_idx);
......@@ -326,6 +327,8 @@ int PredictorClient::batch_predict(
fprintf(stderr, "%s\n", oss.str().c_str());
}
_api.thrd_clear();
return 0;
}
......
......@@ -78,7 +78,6 @@ PYBIND11_MODULE(serving_client, m) {
[](PredictorClient &self) { self.create_predictor(); })
.def("destroy_predictor",
[](PredictorClient &self) { self.destroy_predictor(); })
.def("batch_predict",
[](PredictorClient &self,
const std::vector<std::vector<std::vector<float>>>
......
......@@ -27,9 +27,9 @@ namespace predictor {
}
#endif
#ifdef WITH_GPU
#define USE_PTHREAD
#endif
// #ifdef WITH_GPU
// #define USE_PTHREAD
// #endif
#ifdef USE_PTHREAD
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "core/sdk-cpp/include/common.h"
namespace baidu {
namespace paddle_serving {
namespace sdk_cpp {
#ifndef CATCH_ANY_AND_RET
#define CATCH_ANY_AND_RET(errno) \
catch (...) { \
LOG(ERROR) << "exception catched"; \
return errno; \
}
#endif
#define USE_PTHREAD
#ifdef USE_PTHREAD
#define THREAD_T pthread_t
#define THREAD_KEY_T pthread_key_t
#define THREAD_MUTEX_T pthread_mutex_t
#define THREAD_KEY_CREATE pthread_key_create
#define THREAD_SETSPECIFIC pthread_setspecific
#define THREAD_GETSPECIFIC pthread_getspecific
#define THREAD_CREATE pthread_create
#define THREAD_CANCEL pthread_cancel
#define THREAD_JOIN pthread_join
#define THREAD_KEY_DELETE pthread_key_delete
#define THREAD_MUTEX_INIT pthread_mutex_init
#define THREAD_MUTEX_LOCK pthread_mutex_lock
#define THREAD_MUTEX_UNLOCK pthread_mutex_unlock
#define THREAD_MUTEX_DESTROY pthread_mutex_destroy
#define THREAD_COND_T pthread_cond_t
#define THREAD_COND_INIT pthread_cond_init
#define THREAD_COND_SIGNAL pthread_cond_signal
#define THREAD_COND_WAIT pthread_cond_wait
#define THREAD_COND_DESTROY pthread_cond_destroy
#else
#define THREAD_T bthread_t
#define THREAD_KEY_T bthread_key_t
#define THREAD_MUTEX_T bthread_mutex_t
#define THREAD_KEY_CREATE bthread_key_create
#define THREAD_SETSPECIFIC bthread_setspecific
#define THREAD_GETSPECIFIC bthread_getspecific
#define THREAD_CREATE bthread_start_background
#define THREAD_CANCEL bthread_stop
#define THREAD_JOIN bthread_join
#define THREAD_KEY_DELETE bthread_key_delete
#define THREAD_MUTEX_INIT bthread_mutex_init
#define THREAD_MUTEX_LOCK bthread_mutex_lock
#define THREAD_MUTEX_UNLOCK bthread_mutex_unlock
#define THREAD_MUTEX_DESTROY bthread_mutex_destroy
#define THREAD_COND_T bthread_cond_t
#define THREAD_COND_INIT bthread_cond_init
#define THREAD_COND_SIGNAL bthread_cond_signal
#define THREAD_COND_WAIT bthread_cond_wait
#define THREAD_COND_DESTROY bthread_cond_destroy
#endif
} // namespace sdk_cpp
} // namespace paddle_serving
} // namespace baidu
......@@ -19,6 +19,7 @@
#include <vector>
#include "core/sdk-cpp/include/common.h"
#include "core/sdk-cpp/include/endpoint_config.h"
#include "core/sdk-cpp/include/macros.h"
#include "core/sdk-cpp/include/predictor.h"
#include "core/sdk-cpp/include/stub.h"
......@@ -245,7 +246,7 @@ class StubImpl : public Stub {
const brpc::ChannelOptions& options);
StubTLS* get_tls() {
return static_cast<StubTLS*>(bthread_getspecific(_bthread_key));
return static_cast<StubTLS*>(THREAD_GETSPECIFIC(_bthread_key));
}
private:
......@@ -262,7 +263,8 @@ class StubImpl : public Stub {
uint32_t _package_size;
// tls handlers
bthread_key_t _bthread_key;
// bthread_key_t _bthread_key;
THREAD_KEY_T _bthread_key;
// bvar variables
std::map<std::string, BvarWrapper*> _ltc_bvars;
......
......@@ -70,7 +70,7 @@ int StubImpl<T, C, R, I, O>::initialize(const VariantInfo& var,
_endpoint = ep;
if (bthread_key_create(&_bthread_key, NULL) != 0) {
if (THREAD_KEY_CREATE(&_bthread_key, NULL) != 0) {
LOG(FATAL) << "Failed create key for stub tls";
return -1;
}
......@@ -132,13 +132,13 @@ int StubImpl<T, C, R, I, O>::initialize(const VariantInfo& var,
template <typename T, typename C, typename R, typename I, typename O>
int StubImpl<T, C, R, I, O>::thrd_initialize() {
if (bthread_getspecific(_bthread_key) != NULL) {
if (THREAD_GETSPECIFIC(_bthread_key) != NULL) {
LOG(WARNING) << "Already thread initialized for stub";
return 0;
}
StubTLS* tls = new (std::nothrow) StubTLS();
if (!tls || bthread_setspecific(_bthread_key, tls) != 0) {
if (!tls || THREAD_SETSPECIFIC(_bthread_key, tls) != 0) {
LOG(FATAL) << "Failed binding tls data to bthread_key";
return -1;
}
......
......@@ -12,23 +12,20 @@ Paddle Serving支持基于Paddle进行训练的各种模型,并通过指定模
import paddlehub as hub
model_name = "bert_chinese_L-12_H-768_A-12"
module = hub.Module(model_name)
inputs, outputs, program = module.context(
trainable=True, max_seq_len=20)
feed_keys = ["input_ids", "position_ids", "segment_ids",
"input_mask", "pooled_output", "sequence_output"]
inputs, outputs, program = module.context(trainable=True, max_seq_len=20)
feed_keys = ["input_ids", "position_ids", "segment_ids", "input_mask", "pooled_output", "sequence_output"]
fetch_keys = ["pooled_output", "sequence_output"]
feed_dict = dict(zip(feed_keys, [inputs[x] for x in feed_keys]))
fetch_dict = dict(zip(fetch_keys, [outputs[x]] for x in fetch_keys))
import paddle_serving_client.io as serving_io
serving_io.save_model("bert_seq20_model", "bert_seq20_client",
feed_dict, fetch_dict, program)
serving_io.save_model("bert_seq20_model", "bert_seq20_client", feed_dict, fetch_dict, program)
```
#### Step2:启动服务
``` shell
python -m paddle_serving_server_gpu.serve --model bert_seq20_model --thread 10 --port 9292 --gpu_ids 0
python -m paddle_serving_server_gpu.serve --model bert_seq20_model --port 9292 --gpu_ids 0
```
| 参数 | 含义 |
......@@ -53,7 +50,6 @@ pip install paddle_serving_app
客户端脚本 bert_client.py内容如下
``` python
import os
import sys
from paddle_serving_client import Client
from paddle_serving_app import ChineseBertReader
......
......@@ -2,6 +2,8 @@
(简体中文|[English](./DESIGN.md))
注意本页内容有已经过期,请查看:[设计文档](https://github.com/PaddlePaddle/Serving/blob/develop/doc/DESIGN_DOC_CN.md)
## 1. 项目背景
PaddlePaddle是百度开源的机器学习框架,广泛支持各种深度学习模型的定制化开发; Paddle Serving是Paddle的在线预测部分,与Paddle模型训练环节无缝衔接,提供机器学习预测云服务。本文将从模型、服务、接入等层面,自底向上描述Paddle Serving设计方案。
......
......@@ -164,12 +164,26 @@ Distributed Sparse Parameter Indexing is commonly seen in advertising and recomm
<img src='cube_eng.png' width = "450" height = "230">
<br>
<p>
Why do we need to support distributed sparse parameter indexing in Paddle Serving? 1) In some recommendation scenarios, the number of features can be up to hundreds of billions that a single node can not hold the parameters within random access memory. 2) Paddle Serving supports distributed sparse parameter indexing that can couple with paddle inference. Users do not need to do extra work to have a low latency inference engine with hundreds of billions of parameters.
### 3.2 Model Management, online A/B test, Model Online Reloading
Paddle Serving's C++ engine supports model management, online A/B test and model online reloading. Currently, python API is not released yet, please wait for the next release.
### 3.2 Online A/B test
After sufficient offline evaluation of the model, online A/B test is usually needed to decide whether to enable the service on a large scale. The following figure shows the basic structure of A/B test with Paddle Serving. After the client is configured with the corresponding configuration, the traffic will be automatically distributed to different servers to achieve A/B test. Please refer to [ABTEST in Paddle Serving](ABTEST_IN_PADDLE_SERVING.md) for specific examples.
<p align="center">
<br>
<img src='abtest.png' width = "345" height = "230">
<br>
<p>
### 3.3 Model Online Reloading
In order to ensure the availability of services, the model needs to be hot loaded without service interruption. Paddle Serving supports this feature and provides a tool for monitoring output models to update local models. Please refer to [Hot loading in Paddle Serving](HOT_LOADING_IN_SERVING.md) for specific examples.
### 3.4 Model Management
Paddle Serving's C++ engine supports model management. Currently, python API is not released yet, please wait for the next release.
## 4. User Types
Paddle Serving provides RPC and HTTP protocol for users. For HTTP service, we recommend users with median or small traffic services to use, and the latency is not a strict requirement. For RPC protocol, we recommend high traffic services and low latency required services to use. For users who use distributed sparse parameter indexing built-in service, it is not necessary to care about the underlying details of communication. The following figure gives out several scenarios that user may want to use Paddle Serving.
......
......@@ -159,14 +159,30 @@ Paddle Serving的核心执行引擎是一个有向无环图,图中的每个节
<img src='cube_eng.png' width = "450" height = "230">
<br>
<p>
为什么要使用Paddle Serving提供的分布式稀疏参数索引服务?1)在一些推荐场景中,模型的输入特征规模通常可以达到上千亿,单台机器无法支撑T级别模型在内存的保存,因此需要进行分布式存储。2)Paddle Serving提供的分布式稀疏参数索引服务,具有并发请求多个节点的能力,从而以较低的延时完成预估服务。
### 3.2 模型管理、在线A/B流量测试、模型热加载
### 3.2 在线A/B流量测试
在对模型进行充分的离线评估后,通常需要进行在线A/B测试,来决定是否大规模上线服务。下图为使用Paddle Serving做A/B测试的基本结构,Client端做好相应的配置后,自动将流量分发给不同的Server,从而完成A/B测试。具体例子请参考[如何使用Paddle Serving做ABTEST](ABTEST_IN_PADDLE_SERVING_CN.md)
<p align="center">
<br>
<img src='abtest.png' width = "345" height = "230">
<br>
<p>
### 3.3 模型热加载
Paddle Serving的C++引擎支持模型管理、在线A/B流量测试、模型热加载等功能,当前在Python API还有没完全开放这部分功能的配置,敬请期待。
为了保证服务的可用性,需要在服务不中断的情况下对模型进行热加载。Paddle Serving对该特性进行了支持,并提供了一个监控产出模型更新本地模型的工具,具体例子请参考[Paddle Serving中的模型热加载](HOT_LOADING_IN_SERVING_CN.md)
### 3.4 模型管理
Paddle Serving的C++引擎支持模型管理功能,当前在Python API还有没完全开放这部分功能的配置,敬请期待。
## 4. 用户类型
Paddle Serving面向的用户提供RPC和HTTP两种访问协议。对于HTTP协议,我们更倾向于流量中小型的服务使用,并且对延时没有严格要求的AI服务开发者。对于RPC协议,我们面向流量较大,对延时要求更高的用户,此外RPC的客户端可能也处在一个大系统的服务中,这种情况下非常适合使用Paddle Serving提供的RPC服务。对于使用分布式稀疏参数索引服务而言,Paddle Serving的用户不需要关心底层的细节,其调用本质也是通过RPC服务再调用RPC服务。下图给出了当前设计的Paddle Serving可能会使用Serving服务的几种场景。
<p align="center">
......
# How to Convert Paddle Inference Model To Paddle Serving Format
([简体中文](./INFERENCE_TO_SERVING_CN.md)|English)
## Example
``` python
from paddle_serving_client.io import inference_model_to_serving
inference_model_dir = "your_inference_model"
serving_client_dir = "serving_client_dir"
serving_server_dir = "serving_server_dir"
feed_var_names, fetch_var_names = inference_model_to_serving(
inference_model_dir, serving_client_dir, serving_server_dir)
```
# 如何从Paddle保存的预测模型转为Paddle Serving格式可部署的模型
([English](./INFERENCE_TO_SERVING.md)|简体中文)
## 示例
``` python
from paddle_serving_client.io import inference_model_to_serving
inference_model_dir = "your_inference_model"
serving_client_dir = "serving_client_dir"
serving_server_dir = "serving_server_dir"
feed_var_names, fetch_var_names = inference_model_to_serving(
inference_model_dir, serving_client_dir, serving_server_dir)
```
# How to develop a new Web service?
([简体中文](NEW_WEB_SERVICE_CN.md)|English)
This document will take the image classification service based on the Imagenet data set as an example to introduce how to develop a new web service. The complete code can be visited at [here](https://github.com/PaddlePaddle/Serving/blob/develop/python/examples/imagenet/image_classification_service.py).
## WebService base class
Paddle Serving implements the [WebService](https://github.com/PaddlePaddle/Serving/blob/develop/python/paddle_serving_server/web_service.py#L23) base class. You need to override its `preprocess` and `postprocess` method. The default implementation is as follows:
```python
class WebService(object):
def preprocess(self, feed={}, fetch=[]):
return feed, fetch
def postprocess(self, feed={}, fetch=[], fetch_map=None):
return fetch_map
```
### preprocess
The preprocess method has two input parameters, `feed` and `fetch`. For an HTTP request `request`:
- The value of `feed` is request data `request.json`
- The value of `fetch` is the fetch part `request.json["fetch"]` in the request data
The return values are the feed and fetch values used in the prediction.
### postprocess
The postprocess method has three input parameters, `feed`, `fetch` and `fetch_map`:
- The value of `feed` is request data `request.json`
- The value of `fetch` is the fetch part `request.json["fetch"]` in the request data
- The value of `fetch_map` is the model output value.
The return value will be processed as `{"reslut": fetch_map}` as the return of the HTTP request.
## Develop ImageService class
```python
class ImageService(WebService):
def preprocess(self, feed={}, fetch=[]):
reader = ImageReader()
if "image" not in feed:
raise ("feed data error!")
if isinstance(feed["image"], list):
feed_batch = []
for image in feed["image"]:
sample = base64.b64decode(image)
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
feed_batch.append(res_feed)
return feed_batch, fetch
else:
sample = base64.b64decode(feed["image"])
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
return res_feed, fetch
```
For the above `ImageService`, only the `preprocess` method is rewritten to process the image data in Base64 format into the data format required by prediction.
# 如何开发一个新的Web Service?
(简体中文|[English](NEW_WEB_SERVICE.md))
本文档将以Imagenet图像分类服务为例,来介绍如何开发一个新的Web Service。您可以在[这里](https://github.com/PaddlePaddle/Serving/blob/develop/python/examples/imagenet/image_classification_service.py)查阅完整的代码。
## WebService基类
Paddle Serving实现了[WebService](https://github.com/PaddlePaddle/Serving/blob/develop/python/paddle_serving_server/web_service.py#L23)基类,您需要重写它的`preprocess`方法和`postprocess`方法,默认实现如下:
```python
class WebService(object):
def preprocess(self, feed={}, fetch=[]):
return feed, fetch
def postprocess(self, feed={}, fetch=[], fetch_map=None):
return fetch_map
```
### preprocess方法
preprocess方法有两个输入参数,`feed``fetch`。对于一个HTTP请求`request`
- `feed`的值为请求数据`request.json`
- `fetch`的值为请求数据中的fetch部分`request.json["fetch"]`
返回值分别是预测过程中用到的feed和fetch值。
### postprocess方法
postprocess方法有三个输入参数,`feed``fetch``fetch_map`
- `feed`的值为请求数据`request.json`
- `fetch`的值为请求数据中的fetch部分`request.json["fetch"]`
- `fetch_map`的值为fetch到的模型输出值
返回值将会被处理成`{"reslut": fetch_map}`作为HTTP请求的返回。
## 开发ImageService类
```python
class ImageService(WebService):
def preprocess(self, feed={}, fetch=[]):
reader = ImageReader()
if "image" not in feed:
raise ("feed data error!")
if isinstance(feed["image"], list):
feed_batch = []
for image in feed["image"]:
sample = base64.b64decode(image)
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
feed_batch.append(res_feed)
return feed_batch, fetch
else:
sample = base64.b64decode(feed["image"])
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
return res_feed, fetch
```
对于上述的`ImageService`,只重写了前处理方法,将base64格式的图片数据处理成模型预测需要的数据格式。
doc/abtest.png

291.5 KB | W: | H:

doc/abtest.png

295.1 KB | W: | H:

doc/abtest.png
doc/abtest.png
doc/abtest.png
doc/abtest.png
  • 2-up
  • Swipe
  • Onion skin
......@@ -36,3 +36,4 @@ bert_service.set_gpus(gpu_ids)
bert_service.prepare_server(
workdir="workdir", port=int(sys.argv[2]), device="gpu")
bert_service.run_server()
bert_service.run_flask()
# Faster RCNN model on Paddle Serving
([简体中文](./README_CN.md)|English)
### Get The Faster RCNN Model
```
wget https://paddle-serving.bj.bcebos.com/pddet_demo/faster_rcnn_model.tar.gz
wget https://paddle-serving.bj.bcebos.com/pddet_demo/infer_cfg.yml
```
If you want to have more detection models, please refer to [Paddle Detection Model Zoo](https://github.com/PaddlePaddle/PaddleDetection/blob/release/0.2/docs/MODEL_ZOO_cn.md)
### Start the service
```
tar xf faster_rcnn_model.tar.gz
mv faster_rcnn_model/pddet *.
GLOG_v=2 python -m paddle_serving_server_gpu.serve --model pddet_serving_model --port 9494 --gpu_id 0
```
### Perform prediction
```
python test_client.py pddet_client_conf/serving_client_conf.prototxt infer_cfg.yml 000000570688.jpg
```
## 3. Result analysis
<p align = "center">
    <br>
<img src = '000000570688.jpg'>
    <br>
<p>
This is the input picture
  
<p align = "center">
    <br>
<img src = '000000570688_bbox.jpg'>
    <br>
<p>
This is the picture after adding bbox. You can see that the client has done post-processing for the picture. In addition, the output/bbox.json also has the number and coordinate information of each box.
# 使用Paddle Serving部署Faster RCNN模型
(简体中文|[English](./README.md))
## 获得Faster RCNN模型
```
wget https://paddle-serving.bj.bcebos.com/pddet_demo/faster_rcnn_model.tar.gz
wget https://paddle-serving.bj.bcebos.com/pddet_demo/infer_cfg.yml
```
如果你想要更多的检测模型,请参考[Paddle检测模型库](https://github.com/PaddlePaddle/PaddleDetection/blob/release/0.2/docs/MODEL_ZOO_cn.md)
### 启动服务
```
tar xf faster_rcnn_model.tar.gz
mv faster_rcnn_model/pddet* .
GLOG_v=2 python -m paddle_serving_server_gpu.serve --model pddet_serving_model --port 9494 --gpu_id 0
```
### 执行预测
```
python test_client.py pddet_client_conf/serving_client_conf.prototxt infer_cfg.yml 000000570688.jpg
```
## 3. 结果分析
<p align="center">
<br>
<img src='000000570688.jpg' >
<br>
<p>
这是输入图片
<p align="center">
<br>
<img src='000000570688_bbox.jpg' >
<br>
<p>
这是实现添加了bbox之后的图片,可以看到客户端已经为图片做好了后处理,此外在output/bbox.json也有各个框的编号和坐标信息。
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
import sys
import os
import time
from paddle_serving_app.reader.pddet import Detection
import numpy as np
py_version = sys.version_info[0]
feed_var_names = ['image', 'im_shape', 'im_info']
fetch_var_names = ['multiclass_nms']
pddet = Detection(config_path=sys.argv[2], output_dir="./output")
feed_dict = pddet.preprocess(feed_var_names, sys.argv[3])
client = Client()
client.load_client_config(sys.argv[1])
client.connect(['127.0.0.1:9494'])
fetch_map = client.predict(feed=feed_dict, fetch=fetch_var_names)
outs = fetch_map.values()
pddet.postprocess(fetch_map, fetch_var_names)
......@@ -31,14 +31,14 @@ class ImageService(WebService):
sample = base64.b64decode(image)
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
res_feed["image"] = img
feed_batch.append(res_feed)
return feed_batch, fetch
else:
sample = base64.b64decode(feed["image"])
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
res_feed["image"] = img
return res_feed, fetch
......@@ -47,3 +47,4 @@ image_service.load_model_config(sys.argv[1])
image_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
image_service.run_server()
image_service.run_flask()
......@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_server_gpu.web_service import WebService
import sys
import cv2
import base64
import numpy as np
from image_reader import ImageReader
from paddle_serving_server_gpu.web_service import WebService
class ImageService(WebService):
......@@ -32,14 +32,14 @@ class ImageService(WebService):
sample = base64.b64decode(image)
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
res_feed["image"] = img
feed_batch.append(res_feed)
return feed_batch, fetch
else:
sample = base64.b64decode(feed["image"])
img = reader.process_image(sample)
res_feed = {}
res_feed["image"] = img.reshape(-1)
res_feed["image"] = img
return res_feed, fetch
......@@ -49,3 +49,4 @@ image_service.set_gpus("0,1")
image_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="gpu")
image_service.run_server()
image_service.run_flask()
......@@ -31,7 +31,7 @@ def predict(image_path, server):
r = requests.post(
server, data=req, headers={"Content-Type": "application/json"})
try:
print(r.json()["score"][0])
print(r.json()["result"]["score"])
except ValueError:
print(r.text)
return r
......
......@@ -26,7 +26,7 @@ start = time.time()
for i in range(1000):
with open("./data/n01440764_10026.JPEG", "rb") as f:
img = f.read()
img = reader.process_image(img).reshape(-1)
img = reader.process_image(img)
fetch_map = client.predict(feed={"image": img}, fetch=["score"])
end = time.time()
print(end - start)
......
......@@ -39,3 +39,4 @@ imdb_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
imdb_service.run_server()
imdb_service.run_flask()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
from .image_tool import Resize, Detection
此差异已折叠。
......@@ -26,6 +26,34 @@ int_type = 0
float_type = 1
class _NOPProfiler(object):
def record(self, name):
pass
def print_profile(self):
pass
class _TimeProfiler(object):
def __init__(self):
self.pid = os.getpid()
self.print_head = 'PROFILE\tpid:{}\t'.format(self.pid)
self.time_record = [self.print_head]
def record(self, name):
self.time_record.append('{}:{} '.format(
name, int(round(time.time() * 1000000))))
def print_profile(self):
self.time_record.append('\n')
sys.stderr.write(''.join(self.time_record))
self.time_record = [self.print_head]
_is_profile = int(os.environ.get('FLAGS_profile_client', 0))
_Profiler = _TimeProfiler if _is_profile else _NOPProfiler
class SDKConfig(object):
def __init__(self):
self.sdk_desc = sdk.SDKConf()
......@@ -89,6 +117,7 @@ class Client(object):
self.predictor_sdk_ = None
self.producers = []
self.consumer = None
self.profile_ = _Profiler()
def rpath(self):
lib_path = os.path.dirname(paddle_serving_client.__file__)
......@@ -184,6 +213,8 @@ class Client(object):
key))
def predict(self, feed=None, fetch=None, need_variant_tag=False):
self.profile_.record('py_prepro_0')
if feed is None or fetch is None:
raise ValueError("You should specify feed and fetch for prediction")
......@@ -256,11 +287,17 @@ class Client(object):
int_slot_batch.append(int_slot)
float_slot_batch.append(float_slot)
self.profile_.record('py_prepro_1')
self.profile_.record('py_client_infer_0')
result_batch = self.result_handle_
res = self.client_handle_.batch_predict(
float_slot_batch, float_feed_names, float_shape, int_slot_batch,
int_feed_names, int_shape, fetch_names, result_batch, self.pid)
self.profile_.record('py_client_infer_1')
self.profile_.record('py_postpro_0')
if res == -1:
return None
......@@ -273,7 +310,7 @@ class Client(object):
if self.fetch_names_to_type_[name] == int_type:
result_map[name] = result_batch.get_int64_by_name(mi, name)
shape = result_batch.get_shape(mi, name)
result_map[name] = np.array(result_map[name])
result_map[name] = np.array(result_map[name], dtype='int64')
result_map[name].shape = shape
if name in self.lod_tensor_set:
result_map["{}.lod".format(
......@@ -281,7 +318,8 @@ class Client(object):
elif self.fetch_names_to_type_[name] == float_type:
result_map[name] = result_batch.get_float_by_name(mi, name)
shape = result_batch.get_shape(mi, name)
result_map[name] = np.array(result_map[name])
result_map[name] = np.array(
result_map[name], dtype='float32')
result_map[name].shape = shape
if name in self.lod_tensor_set:
result_map["{}.lod".format(
......@@ -299,6 +337,9 @@ class Client(object):
for mi, engine_name in enumerate(model_engine_names)
}
self.profile_.record('py_postpro_1')
self.profile_.print_profile()
# When using the A/B test, the tag of variant needs to be returned
return ret if not need_variant_tag else [
ret, self.result_handle_.variant_tag()
......
......@@ -20,6 +20,7 @@ from paddle.fluid.framework import default_main_program
from paddle.fluid.framework import Program
from paddle.fluid import CPUPlace
from paddle.fluid.io import save_inference_model
import paddle.fluid as fluid
from ..proto import general_model_config_pb2 as model_conf
import os
......@@ -100,3 +101,20 @@ def save_model(server_model_folder,
with open("{}/serving_server_conf.stream.prototxt".format(
server_model_folder), "wb") as fout:
fout.write(config.SerializeToString())
def inference_model_to_serving(infer_model, serving_client, serving_server):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_program, feed_target_names, fetch_targets = \
fluid.io.load_inference_model(dirname=infer_model, executor=exe)
feed_dict = {
x: inference_program.global_block().var(x)
for x in feed_target_names
}
fetch_dict = {x.name: x for x in fetch_targets}
save_model(serving_client, serving_server, feed_dict, fetch_dict,
inference_program)
feed_names = feed_dict.keys()
fetch_names = fetch_dict.keys()
return feed_names, fetch_names
......@@ -351,6 +351,7 @@ class Server(object):
self._prepare_resource(workdir)
self._prepare_engine(self.model_config_paths, device)
self._prepare_infer_service(port)
self.port = port
self.workdir = workdir
infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
......
......@@ -18,6 +18,8 @@ from flask import Flask, request, abort
from multiprocessing import Pool, Process
from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_client import Client
from contextlib import closing
import socket
class WebService(object):
......@@ -41,19 +43,34 @@ class WebService(object):
server.set_num_threads(16)
server.load_model_config(self.model_config)
server.prepare_server(
workdir=self.workdir, port=self.port + 1, device=self.device)
workdir=self.workdir, port=self.port_list[0], device=self.device)
server.run_server()
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
return False
def prepare_server(self, workdir="", port=9393, device="cpu"):
self.workdir = workdir
self.port = port
self.device = device
default_port = 12000
self.port_list = []
for i in range(1000):
if self.port_is_available(default_port + i):
self.port_list.append(default_port + i)
break
def _launch_web_service(self):
self.client_service = Client()
self.client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model_config))
self.client_service.connect(["0.0.0.0:{}".format(self.port + 1)])
self.client = Client()
self.client.load_client_config("{}/serving_server_conf.prototxt".format(
self.model_config))
self.client.connect(["0.0.0.0:{}".format(self.port_list[0])])
def get_prediction(self, request):
if not request.json:
......@@ -64,12 +81,12 @@ class WebService(object):
feed, fetch = self.preprocess(request.json, request.json["fetch"])
if isinstance(feed, dict) and "fetch" in feed:
del feed["fetch"]
fetch_map = self.client_service.predict(feed=feed, fetch=fetch)
for key in fetch_map:
fetch_map[key] = fetch_map[key][0].tolist()
result = self.postprocess(
fetch_map = self.client.predict(feed=feed, fetch=fetch)
fetch_map = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map)
result = {"result": result}
for key in fetch_map:
fetch_map[key] = fetch_map[key].tolist()
result = {"result": fetch_map}
except ValueError:
result = {"result": "Request Value Error"}
return result
......@@ -83,6 +100,24 @@ class WebService(object):
p_rpc = Process(target=self._launch_rpc_service)
p_rpc.start()
def run_flask(self):
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
processes=4)
def preprocess(self, feed={}, fetch=[]):
return feed, fetch
......
......@@ -14,14 +14,15 @@
# pylint: disable=doc-string-missing
from flask import Flask, request, abort
from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server
import paddle_serving_server_gpu as serving
from contextlib import closing
from multiprocessing import Pool, Process, Queue
from paddle_serving_client import Client
from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server
from paddle_serving_server_gpu.serve import start_multi_card
import socket
import sys
import numpy as np
import paddle_serving_server_gpu as serving
class WebService(object):
......@@ -67,22 +68,39 @@ class WebService(object):
def _launch_rpc_service(self, service_idx):
self.rpc_service_list[service_idx].run_server()
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
return False
def prepare_server(self, workdir="", port=9393, device="gpu", gpuid=0):
self.workdir = workdir
self.port = port
self.device = device
self.gpuid = gpuid
self.port_list = []
default_port = 12000
for i in range(1000):
if self.port_is_available(default_port + i):
self.port_list.append(default_port + i)
if len(self.port_list) > len(self.gpus):
break
if len(self.gpus) == 0:
# init cpu service
self.rpc_service_list.append(
self.default_rpc_service(
self.workdir, self.port + 1, -1, thread_num=10))
self.workdir, self.port_list[0], -1, thread_num=10))
else:
for i, gpuid in enumerate(self.gpus):
self.rpc_service_list.append(
self.default_rpc_service(
"{}_{}".format(self.workdir, i),
self.port + 1 + i,
self.port_list[i],
gpuid,
thread_num=10))
......@@ -94,9 +112,9 @@ class WebService(object):
endpoints = ""
if gpu_num > 0:
for i in range(gpu_num):
endpoints += "127.0.0.1:{},".format(self.port + i + 1)
endpoints += "127.0.0.1:{},".format(self.port_list[i])
else:
endpoints = "127.0.0.1:{}".format(self.port + 1)
endpoints = "127.0.0.1:{}".format(self.port_list[0])
self.client.connect([endpoints])
def get_prediction(self, request):
......@@ -109,11 +127,11 @@ class WebService(object):
if isinstance(feed, dict) and "fetch" in feed:
del feed["fetch"]
fetch_map = self.client.predict(feed=feed, fetch=fetch)
for key in fetch_map:
fetch_map[key] = fetch_map[key][0].tolist()
result = self.postprocess(
fetch_map = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map)
result = {"result": result}
for key in fetch_map:
fetch_map[key] = fetch_map[key].tolist()
result = {"result": fetch_map}
except ValueError:
result = {"result": "Request Value Error"}
return result
......@@ -131,6 +149,24 @@ class WebService(object):
for p in server_pros:
p.start()
def run_flask(self):
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
processes=4)
def preprocess(self, feed={}, fetch=[]):
return feed, fetch
......
......@@ -47,7 +47,8 @@ REQUIRED_PACKAGES = [
packages=['paddle_serving_app',
'paddle_serving_app.reader',
'paddle_serving_app.utils']
'paddle_serving_app.utils',
'paddle_serving_app.reader.pddet']
package_data={}
package_dir={'paddle_serving_app':
......@@ -55,7 +56,9 @@ package_dir={'paddle_serving_app':
'paddle_serving_app.reader':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/reader',
'paddle_serving_app.utils':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/utils',}
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/utils',
'paddle_serving_app.reader.pddet':
'${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/reader/pddet',}
setup(
name='paddle-serving-app',
......
......@@ -323,6 +323,9 @@ function python_test_bert() {
echo "bert RPC inference pass"
;;
*)
echo "error type"
exit 1
;;
esac
echo "test bert $TYPE finished as expected."
unset SERVING_BIN
......@@ -357,6 +360,9 @@ function python_test_imdb() {
echo "imdb ignore GPU test"
;;
*)
echo "error type"
exit 1
;;
esac
echo "test imdb $TYPE finished as expected."
unset SERVING_BIN
......@@ -389,6 +395,9 @@ function python_test_lac() {
echo "lac ignore GPU test"
;;
*)
echo "error type"
exit 1
;;
esac
echo "test lac $TYPE finished as expected."
unset SERVING_BIN
......@@ -408,6 +417,248 @@ function python_run_test() {
cd ../.. # pwd: /Serving
}
function monitor_test() {
local TYPE=$1 # pwd: /Serving
mkdir _monitor_test && cd _monitor_test # pwd: /Serving/_monitor_test
case $TYPE in
CPU):
pip install pyftpdlib
mkdir remote_path
mkdir local_path
cd remote_path # pwd: /Serving/_monitor_test/remote_path
check_cmd "python -m pyftpdlib -p 8000 &>/dev/null &"
cd .. # pwd: /Serving/_monitor_test
# type: ftp
# remote_path: /
# remote_model_name: uci_housing.tar.gz
# local_tmp_path: ___tmp
# local_path: local_path
cd remote_path # pwd: /Serving/_monitor_test/remote_path
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
touch donefile
cd .. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server.monitor \
--type='ftp' --ftp_host='127.0.0.1' --ftp_port='8000' \
--remote_path='/' --remote_model_name='uci_housing.tar.gz' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --unpacked_filename='uci_housing_model' \
--interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
# type: ftp
# remote_path: /tmp_dir
# remote_model_name: uci_housing_model
# local_tmp_path: ___tmp
# local_path: local_path
mkdir -p remote_path/tmp_dir && cd remote_path/tmp_dir # pwd: /Serving/_monitor_test/remote_path/tmp_dir
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
tar -xzf uci_housing.tar.gz
touch donefile
cd ../.. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server.monitor \
--type='ftp' --ftp_host='127.0.0.1' --ftp_port='8000' \
--remote_path='/tmp_dir' --remote_model_name='uci_housing_model' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
# type: general
# remote_path: /
# remote_model_name: uci_housing.tar.gz
# local_tmp_path: ___tmp
# local_path: local_path
cd remote_path # pwd: /Serving/_monitor_test/remote_path
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
touch donefile
cd .. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server.monitor \
--type='general' --general_host='ftp://127.0.0.1:8000' \
--remote_path='/' --remote_model_name='uci_housing.tar.gz' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --unpacked_filename='uci_housing_model' \
--interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
# type: general
# remote_path: /tmp_dir
# remote_model_name: uci_housing_model
# local_tmp_path: ___tmp
# local_path: local_path
mkdir -p remote_path/tmp_dir && cd remote_path/tmp_dir # pwd: /Serving/_monitor_test/remote_path/tmp_dir
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
tar -xzf uci_housing.tar.gz
touch donefile
cd ../.. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server.monitor \
--type='general' --general_host='ftp://127.0.0.1:8000' \
--remote_path='/tmp_dir' --remote_model_name='uci_housing_model' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
ps -ef | grep "pyftpdlib" | grep -v grep | awk '{print $2}' | xargs kill
;;
GPU):
pip install pyftpdlib
mkdir remote_path
mkdir local_path
cd remote_path # pwd: /Serving/_monitor_test/remote_path
check_cmd "python -m pyftpdlib -p 8000 &>/dev/null &"
cd .. # pwd: /Serving/_monitor_test
# type: ftp
# remote_path: /
# remote_model_name: uci_housing.tar.gz
# local_tmp_path: ___tmp
# local_path: local_path
cd remote_path # pwd: /Serving/_monitor_test/remote_path
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
touch donefile
cd .. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server_gpu.monitor \
--type='ftp' --ftp_host='127.0.0.1' --ftp_port='8000' \
--remote_path='/' --remote_model_name='uci_housing.tar.gz' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --unpacked_filename='uci_housing_model' \
--interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
# type: ftp
# remote_path: /tmp_dir
# remote_model_name: uci_housing_model
# local_tmp_path: ___tmp
# local_path: local_path
mkdir -p remote_path/tmp_dir && cd remote_path/tmp_dir # pwd: /Serving/_monitor_test/remote_path/tmp_dir
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
tar -xzf uci_housing.tar.gz
touch donefile
cd ../.. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server_gpu.monitor \
--type='ftp' --ftp_host='127.0.0.1' --ftp_port='8000' \
--remote_path='/tmp_dir' --remote_model_name='uci_housing_model' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
# type: general
# remote_path: /
# remote_model_name: uci_housing.tar.gz
# local_tmp_path: ___tmp
# local_path: local_path
cd remote_path # pwd: /Serving/_monitor_test/remote_path
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
touch donefile
cd .. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server_gpu.monitor \
--type='general' --general_host='ftp://127.0.0.1:8000' \
--remote_path='/' --remote_model_name='uci_housing.tar.gz' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --unpacked_filename='uci_housing_model' \
--interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
# type: general
# remote_path: /tmp_dir
# remote_model_name: uci_housing_model
# local_tmp_path: ___tmp
# local_path: local_path
mkdir -p remote_path/tmp_dir && cd remote_path/tmp_dir # pwd: /Serving/_monitor_test/remote_path/tmp_dir
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
tar -xzf uci_housing.tar.gz
touch donefile
cd ../.. # pwd: /Serving/_monitor_test
mkdir -p local_path/uci_housing_model
python -m paddle_serving_server_gpu.monitor \
--type='general' --general_host='ftp://127.0.0.1:8000' \
--remote_path='/tmp_dir' --remote_model_name='uci_housing_model' \
--remote_donefile_name='donefile' --local_path='local_path' \
--local_model_name='uci_housing_model' --local_timestamp_file='fluid_time_file' \
--local_tmp_path='___tmp' --interval='1' >/dev/null &
sleep 10
if [ ! -f "local_path/uci_housing_model/fluid_time_file" ]; then
echo "local_path/uci_housing_model/fluid_time_file not exist."
exit 1
fi
ps -ef | grep "monitor" | grep -v grep | awk '{print $2}' | xargs kill
rm -rf remote_path/*
rm -rf local_path/*
ps -ef | grep "pyftpdlib" | grep -v grep | awk '{print $2}' | xargs kill
;;
*)
echo "error type"
exit 1
;;
esac
cd .. # pwd: /Serving
rm -rf _monitor_test
echo "test monitor $TYPE finished as expected."
}
function main() {
local TYPE=$1 # pwd: /
init # pwd: /Serving
......@@ -415,6 +666,7 @@ function main() {
build_server $TYPE # pwd: /Serving
build_app $TYPE # pwd: /Serving
python_run_test $TYPE # pwd: /Serving
monitor_test $TYPE # pwd: /Serving
echo "serving $TYPE part finished as expected."
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册