+ const T* get_data() const;
+
+ // ---------------- Other base class members ----------------
+
+ int init(Bus* bus,
+ Dag* dag,
+ uint32_t id,
+ const std::string& name,
+ const std::string& type,
+ void* conf);
+
+ int deinit();
+
+
+ int process(bool debug);
+
+ // Get the input object
+ const google::protobuf::Message* get_request_message();
+
+ const std::string& type() const;
+
+ uint32_t id() const;
+
+ // ------------------ OP Interface -------------------
+
+ // Get the derived Channel object of current OP
+ virtual Channel* mutable_channel() = 0;
+
+ // Get the derived Channel object of current OP
+ virtual const Channel* get_channel() const = 0;
+
+ // Release the derived Channel object of current OP
+ virtual int release_channel() = 0;
+
+ // Inference interface
+ virtual int inference() = 0;
+
+ // ------------------ Conf Interface -------------------
+ virtual void* create_config(const configure::DAGNode& conf) { return NULL; }
+
+ virtual void delete_config(void* conf) {}
+
+ virtual void set_config(void* conf) { return; }
+
+ // ------------------ Metric Interface -------------------
+ virtual void regist_metric() { return; }
+};
+
+```
+
+### 5.4 框架相关接口
+
+Service
+
+```C++
+class InferService {
+ public:
+ static const char* tag() { return "service"; }
+ int init(const configure::InferService& conf);
+ int deinit() { return 0; }
+ int reload();
+ const std::string& name() const;
+ const std::string& full_name() const { return _infer_service_format; }
+
+ // Execute each workflow serially
+ virtual int inference(const google::protobuf::Message* request,
+ google::protobuf::Message* response,
+ butil::IOBufBuilder* debug_os = NULL);
+
+ int debug(const google::protobuf::Message* request,
+ google::protobuf::Message* response,
+ butil::IOBufBuilder* debug_os);
+
+};
+
+class ParallelInferService : public InferService {
+ public:
+ // Execute workflows in parallel
+ int inference(const google::protobuf::Message* request,
+ google::protobuf::Message* response,
+ butil::IOBufBuilder* debug_os) {
+ return 0;
+ }
+};
+```
+ServerManager
+
+```C++
+class ServerManager {
+ public:
+ typedef google::protobuf::Service Service;
+ ServerManager();
+
+ static ServerManager& instance() {
+ static ServerManager server;
+ return server;
+ }
+ static bool reload_starting() { return _s_reload_starting; }
+ static void stop_reloader() { _s_reload_starting = false; }
+ int add_service_by_format(const std::string& format);
+ int start_and_wait();
+};
+```
+
+DAG
+
+```C++
+class Dag {
+ public:
+ EdgeMode parse_mode(std::string& mode); // NOLINT
+
+ int init(const char* path, const char* file, const std::string& name);
+
+ int init(const configure::Workflow& conf, const std::string& name);
+
+ int deinit();
+
+ uint32_t nodes_size();
+
+ const DagNode* node_by_id(uint32_t id);
+
+ const DagNode* node_by_id(uint32_t id) const;
+
+ const DagNode* node_by_name(std::string& name); // NOLINT
+
+ const DagNode* node_by_name(const std::string& name) const;
+
+ uint32_t stage_size();
+
+ const DagStage* stage_by_index(uint32_t index);
+
+ const std::string& name() const { return _dag_name; }
+
+ const std::string& full_name() const { return _dag_name; }
+
+ void regist_metric(const std::string& service_name);
+};
+```
+
+Workflow
+
+```C++
+class Workflow {
+ public:
+ Workflow() {}
+ static const char* tag() { return "workflow"; }
+
+ // Each workflow object corresponds to an independent
+ // configure file, so you can share the object between
+ // different apps.
+ int init(const configure::Workflow& conf);
+
+ DagView* fetch_dag_view(const std::string& service_name);
+
+ int deinit() { return 0; }
+
+ void return_dag_view(DagView* view);
+
+ int reload();
+
+ const std::string& name() { return _name; }
+
+ const std::string& full_name() { return _name; }
+};
+```
diff --git a/doc/DESIGN_DOC.md b/doc/DESIGN_DOC.md
index 312379cd7543e70095e5a6d8168aab06b79a0525..2f8a36ea6686b5add2a7e4e407eabfd14167490d 100644
--- a/doc/DESIGN_DOC.md
+++ b/doc/DESIGN_DOC.md
@@ -1,30 +1,32 @@
-# Paddle Serving设计文档
+# Paddle Serving Design Doc
-## 1. 整体设计目标
+## 1. Design Objectives
-- 长期使命:Paddle Serving是一个PaddlePaddle开源的在线服务框架,长期目标就是围绕着人工智能落地的最后一公里提供越来越专业、可靠、易用的服务。
+- Long Term Vision: Online deployment of deep learning models will be a user-facing application in the future. Any AI developer will face the problem of deploying an online service for his or her trained model.
+Paddle Serving is the official open source online deployment framework. The long term goal of Paddle Serving is to provide professional, reliable and easy-to-use online service to the last mile of AI application.
-- 工业级:为了达到工业级深度学习模型在线部署的要求,
-Paddle Serving提供很多大规模场景需要的部署功能:1)分布式稀疏参数索引功能;2)高并发底层通信能力;3)模型管理、在线A/B流量测试、模型热加载。
+- Easy-To-Use: For algorithmic developers to quickly deploy their models online, Paddle Serving designs APIs that can be used with Paddle's training process seamlessly, most Paddle models can be deployed as a service with one line command.
-- 简单易用:为了让使用Paddle的用户能够以极低的成本部署模型,PaddleServing设计了一套与Paddle训练框架无缝打通的预测部署API,普通模型可以使用一行命令进行服务部署。
+- Industrial Oriented: To meet industrial deployment requirements, Paddle Serving supports lots of large-scale deployment functions: 1) Distributed Sparse Embedding Indexing. 2) Highly concurrent underlying communications. 3) Model Management, online A/B test, model online loading.
-- 功能扩展:当前,Paddle Serving支持C++、Python、Golang的客户端,未来也会面向不同类型的客户新增多种语言的客户端。在Paddle Serving的框架设计方面,尽管当前Paddle Serving以支持Paddle模型的部署为核心功能,
-用户可以很容易嵌入其他的机器学习库部署在线预测。
+- Extensibility: Paddle Serving supports C++, Python and Golang client, and will support more clients with different languages. It is very easy to extend Paddle Serving to support other machine learning inference library, although currently Paddle inference library is the only official supported inference backend.
-## 2. 模块设计与实现
-### 2.1 Python API接口设计
+## 2. Module design and implementation
+
+### 2.1 Python API interface design
+
+#### 2.1.1 save a servable model
+The inference phase of Paddle model focuses on 1) input variables of the model. 2) output variables of the model. 3) model structure and model parameters. Paddle Serving Python API provides a `save_model` interface for trained model, and save necessary information for Paddle Serving to use during deployment phase. An example is as follows:
-#### 2.1.1 训练模型的保存
-Paddle的模型预测需要重点关注的内容:1)模型的输入变量;2)模型的输出变量;3)模型结构和模型参数。Paddle Serving Python API提供用户可以在训练过程中保存模型的接口,并将Paddle Serving在部署阶段需要保存的配置打包保存,一个示例如下:
``` python
import paddle_serving_client.io as serving_io
serving_io.save_model("serving_model", "client_conf",
{"words": data}, {"prediction": prediction},
fluid.default_main_program())
```
-代码示例中,`{"words": data}`和`{"prediction": prediction}`分别指定了模型的输入和输出,`"words"`和`"prediction"`是输出和输出变量的别名,设计别名的目的是为了使开发者能够记忆自己训练模型的输入输出对应的字段。`data`和`prediction`则是Paddle训练过程中的`[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)`,通常代表张量([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor))或变长张量([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor))。调用保存命令后,会按照用户指定的`"serving_model"`和`"client_conf"`生成两个目录,内容如下:
+In the example, `{"words": data}` and `{"prediction": prediction}` assign the inputs and outputs of a model. `"words"` and `"prediction"` are alias names of inputs and outputs. The design of alias name is to help developers to memorize model inputs and model outputs. `data` and `prediction` are Paddle `[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)` in training phase that often represents ([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor)) or ([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor)). When the `save_model` API is called, two directories called `"serving_model"` and `"client_conf"` will be generated. The content of the saved model is as follows:
+
``` shell
.
├── client_conf
@@ -44,11 +46,11 @@ serving_io.save_model("serving_model", "client_conf",
├── serving_server_conf.prototxt
└── serving_server_conf.stream.prototxt
```
-其中,`"serving_client_conf.prototxt"`和`"serving_server_conf.prototxt"`是Paddle Serving的Client和Server端需要加载的配置,`"serving_client_conf.stream.prototxt"`和`"serving_server_conf.stream.prototxt"`是配置文件的二进制形式。`"serving_model"`下保存的其他内容和Paddle保存的模型文件是一致的。我们会考虑未来在Paddle框架中直接保存可服务的配置,实现配置保存对用户无感。
+`"serving_client_conf.prototxt"` and `"serving_server_conf.prototxt"` are the client side and the server side configurations of Paddle Serving, and `"serving_client_conf.stream.prototxt"` and `"serving_server_conf.stream.prototxt"` are the corresponding parts. Other contents saved in the directory are the same as Paddle saved inference model. We are considering to support `save_model` interface in Paddle training framework so that a user is not aware of the servable configurations.
-#### 2.1.2 服务端模型加载
+#### 2.1.2 Model loading on the server side
-服务端的预测逻辑可以通过Paddle Serving Server端的API进行人工定义,一个例子:
+Prediction logics on the server side can be defined through Paddle Serving Server API with a few lines of code, an example is as follows:
``` python
import paddle_serving_server as serving
op_maker = serving.OpMaker()
@@ -63,41 +65,42 @@ op_seq_maker.add_op(dist_kv_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
```
-
-当前Paddle Serving在Server端支持的主要Op请参考如下列表:
+Current Paddle Serving supports operator list on the server side as follows:
-| Op 名称 | 描述 |
+| Op Name | Description |
|--------------|------|
-| `general_reader` | 通用数据格式的读取Op |
-| `genreal_infer` | 通用数据格式的Paddle预测Op |
-| `general_response` | 通用数据格式的响应Op |
-| `general_dist_kv` | 分布式索引Op |
+| `general_reader` | General Data Reading Operator |
+| `genreal_infer` | General Data Inference with Paddle Operator |
+| `general_response` | General Data Response Operator |
+| `general_dist_kv` | Distributed Sparse Embedding Indexing |
-当前Paddle Serving中的预估引擎支持在CPU/GPU上进行预测,对应的预测服务安装包以及镜像也有两个。但无论是CPU上进行模型预估还是GPU上进行模型预估,普通模型的预测都可用一行命令进行启动。
+Paddle Serving supports inference engine on multiple devices. Current supports are CPU and GPU engine. Docker Images of CPU and GPU are provided officially. User can use one line command to start an inference service either on CPU or on GPU.
+
``` shell
python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292
```
``` shell
python -m paddle_serving_server_gpu.serve --model your_servable_model --thread 10 --port 9292
```
-启动命令的选项列表如下:
+
+Options of startup command are listed below:
-| 参数 | 类型 | 默认值 | 描述 |
+| Arguments | Types | Defaults | Descriptions |
|--------------|------|-----------|--------------------------------|
-| `thread` | int | `4` | 服务端的并发数,通常与CPU核数一致即可 |
-| `port` | int | `9292` | 服务暴露给用户的端口 |
-| `name` | str | `""` | 服务名称,当用户指定时代表直接启动的是HTTP服务 |
-| `model` | str | `""` | 服务端模型文件夹路径 |
-| `gpu_ids` | str | `""` | 仅在paddle_serving_server_gpu中可以使用,功能与CUDA_VISIBLE_DEVICES一致 |
+| `thread` | int | `4` | Concurrency on server side, usually equal to the number of CPU core |
+| `port` | int | `9292` | Port exposed to users |
+| `name` | str | `""` | Service name that if a user specifies, the name of HTTP service is allocated |
+| `model` | str | `""` | Servable models for Paddle Serving |
+| `gpu_ids` | str | `""` | Supported only in paddle_serving_server_gpu, similar to the usage of CUDA_VISIBLE_DEVICES |
-举例`python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292`对应到具体的Server端具体配置如下
+For example, `python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292` is the same as the following code as user can define:
``` python
from paddle_serving_server import OpMaker, OpSeqMaker, Server
@@ -117,55 +120,57 @@ server.prepare_server(port=9292, device="cpu")
server.run_server()
```
-#### 2.1.3 客户端访问API
-Paddle Serving支持远程服务访问的协议一种是基于RPC,另一种是HTTP。用户通过RPC访问,可以使用Paddle Serving提供的Python Client API,通过定制输入数据的格式来实现服务访问。下面的例子解释Paddle Serving Client如何定义输入数据。保存可部署模型时需要指定每个输入的别名,例如`sparse`和`dense`,对应的数据可以是离散的ID序列`[1, 1001, 100001]`,也可以是稠密的向量`[0.2, 0.5, 0.1, 0.4, 0.11, 0.22]`。当前Client的设计,对于离散的ID序列,支持Paddle中的`lod_level=0`和`lod_level=1`的情况,即张量以及一维变长张量。对于稠密的向量,支持`N-D Tensor`。用户不需要显式指定输入数据的形状,Paddle Serving的Client API会通过保存配置时记录的输入形状进行对应的检查。
+#### 2.1.3 Paddle Serving Client API
+Paddle Serving supports remote service access through RPC(remote procedure call) and HTTP. RPC access of remote service can be called through Client API of Paddle Serving. A user can define data preprocess function before calling Paddle Serving's client API. The example below explains how to define the input data of Paddle Serving Client. The servable model has two inputs with alias name of `sparse` and `dense`. `sparse` corresponds to sparse sequence ids such as `[1, 1001, 100001]` and `dense` corresponds to dense vector such as `[0.2, 0.5, 0.1, 0.4, 0.11, 0.22]`. For sparse sequence data, current design supports `lod_level=0` and `lod_level=1` of Paddle, that corresponds to `Tensor` and `LodTensor`. For dense vector, current design supports any `N-D Tensor`. Users do not need to assign the shape of inference model input. The Paddle Serving Client API will check the input data's shape with servable configurations.
+
``` python
feed_dict["sparse"] = [1, 1001, 100001]
feed_dict["dense"] = [0.2, 0.5, 0.1, 0.4, 0.11, 0.22]
fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
```
-Client链接Server的代码,通常只需要加载保存模型时保存的Client端配置,以及指定要去访问的服务端点即可。为了保持内部访问进行数据并行的扩展能力,Paddle Serving Client允许定义多个服务端点。
+
+The following code sample shows that Paddle Serving Client API connects to Server API with endpoint of the servers. To use the data parallelism ability during prediction, Paddle Serving Client allows users to define multiple server endpoints.
``` python
client = Client()
client.load_client_config('servable_client_configs')
client.connect(["127.0.0.1:9292"])
```
+### 2.2 Underlying Communication Mechanism
+Paddle Serving adopts [baidu-rpc](https://github.com/apache/incubator-brpc) as underlying communication layer. baidu-rpc is an open-source RPC communication library with high concurrency and low latency advantages compared with other open source RPC library. Millions of instances and thousands of services are using baidu-rpc within Baidu.
-### 2.2 底层通信机制
-Paddle Serving采用[baidu-rpc](https://github.com/apache/incubator-brpc)进行底层的通信。baidu-rpc是百度开源的一款PRC通信库,具有高并发、低延时等特点,已经支持了包括百度在内上百万在线预估实例、上千个在线预估服务,稳定可靠。
+### 2.3 Core Execution Engine
+The core execution engine of Paddle Serving is a Directed acyclic graph(DAG). In the DAG, each node represents a phase of inference service, such as paddle inference prediction, data preprocessing and data postprocessing. DAG can fully parallelize the computation efficiency and can fully utilize the computation resources. For example, when a user has input data that needs to be feed into two models, and combine the scores of the two models, the computation of model scoring is parallelized through DAG.
-### 2.3 核心执行引擎
-Paddle Serving的核心执行引擎是一个有向无环图,图中的每个节点代表预估服务的一个环节,例如计算模型预测打分就是其中一个环节。有向无环图有利于可并发节点充分利用部署实例内的计算资源,缩短延时。一个例子,当同一份输入需要送入两个不同的模型进行预估,并将两个模型预估的打分进行加权求和时,两个模型的打分过程即可以通过有向无环图的拓扑关系并发。
-### 2.4 微服务插件模式
-由于Paddle Serving底层采用基于C++的通信组件,并且核心框架也是基于C/C++编写,当用户想要在服务端定义复杂的前处理与后处理逻辑时,一种办法是修改Paddle Serving底层框架,重新编译源码。另一种方式可以通过在服务端嵌入轻量级的Web服务,通过在Web服务中实现更复杂的预处理逻辑,从而搭建一套逻辑完整的服务。当访问量超过了Web服务能够接受的范围,开发者有足够的理由开发一些高性能的C++预处理逻辑,并嵌入到Serving的原生服务库中。Web服务和RPC服务的关系以及他们的组合方式可以参考下文`用户类型`中的说明。
+### 2.4 Micro service plugin
+The underlying communication of Paddle Serving is implemented with C++ as well as the core framework, it is hard for users who do not familiar with C++ to implement new Paddle Serving Server Operators. Another approach is to use the light-weighted Web Service in Paddle Serving Server that can be viewed as a plugin. A user can implement complex data preprocessing and postprocessing logics to build a complex AI service. If access of the AI service has a large volumn, it is worth to implement the service with high performance Paddle Serving Server operators. The relationship between Web Service and RPC Service can be referenced in `User Type`.
-## 3. 工业级特性
+## 3. Industrial Features
-### 3.1 分布式稀疏参数索引
+### 3.1 Distributed Sparse Parameter Indexing
-分布式稀疏参数索引通常在广告推荐中出现,并与分布式训练配合形成完整的离线-在线一体化部署。下图解释了其中的流程,产品的在线服务接受用户请求后将请求发送给预估服务,同时系统会记录用户的请求以进行相应的训练日志处理和拼接。离线分布式训练系统会针对流式产出的训练日志进行模型增量训练,而增量产生的模型会配送至分布式稀疏参数索引服务,同时对应的稠密的模型参数也会配送至在线的预估服务。在线服务由两部分组成,一部分是针对用户的请求提取特征后,将需要进行模型的稀疏参数索引的特征发送请求给分布式稀疏参数索引服务,针对分布式稀疏参数索引服务返回的稀疏参数再进行后续深度学习模型的计算流程,从而完成预估。
+Distributed Sparse Parameter Indexing is commonly seen in advertising and recommendation scenarios, and is often used coupled with distributed training. The figure below explains a commonly seen architecture for online recommendation. When the recommendation service receives a request from a user, the system will automatically collects training log for the offline distributed online training. Mean while, the request is sent to Paddle Serving Server. For sparse features, distributed sparse parameter index service is called so that sparse parameters can be looked up. The dense input features together with the looked up sparse model parameters are fed into the Paddle Inference Node of the DAG in Paddle Serving Server. Then the score can be responsed through RPC to product service for item ranking.
-
-为什么要使用Paddle Serving提供的分布式稀疏参数索引服务?1)在一些推荐场景中,模型的输入特征规模通常可以达到上千亿,单台机器无法支撑T级别模型在内存的保存,因此需要进行分布式存储。2)Paddle Serving提供的分布式稀疏参数索引服务,具有并发请求多个节点的能力,从而以较低的延时完成预估服务。
+
+Why do we need to support distributed sparse parameter indexing in Paddle Serving? 1) In some recommendation scenarios, the number of features can be up to hundreds of billions that a single node can not hold the parameters within random access memory. 2) Paddle Serving supports distributed sparse parameter indexing that can couple with paddle inference. Users do not need to do extra work to have a low latency inference engine with hundreds of billions of parameters.
-### 3.2 模型管理、在线A/B流量测试、模型热加载
+### 3.2 Model Management, online A/B test, Model Online Reloading
-Paddle Serving的C++引擎支持模型管理、在线A/B流量测试、模型热加载等功能,当前在Python API还有没完全开放这部分功能的配置,敬请期待。
+Paddle Serving's C++ engine supports model management, online A/B test and model online reloading. Currently, python API is not released yet, please wait for the next release.
-## 4. 用户类型
-Paddle Serving面向的用户提供RPC和HTTP两种访问协议。对于HTTP协议,我们更倾向于流量中小型的服务使用,并且对延时没有严格要求的AI服务开发者。对于RPC协议,我们面向流量较大,对延时要求更高的用户,此外RPC的客户端可能也处在一个大系统的服务中,这种情况下非常适合使用Paddle Serving提供的RPC服务。对于使用分布式稀疏参数索引服务而言,Paddle Serving的用户不需要关心底层的细节,其调用本质也是通过RPC服务再调用RPC服务。下图给出了当前设计的Paddle Serving可能会使用Serving服务的几种场景。
+## 4. User Types
+Paddle Serving provides RPC and HTTP protocol for users. For HTTP service, we recommend users with median or small traffic services to use, and the latency is not a strict requirement. For RPC protocol, we recommend high traffic services and low latency required services to use. For users who use distributed sparse parameter indexing built-in service, it is not necessary to care about the underlying details of communication. The following figure gives out several scenarios that user may want to use Paddle Serving.
@@ -173,11 +178,11 @@ Paddle Serving面向的用户提供RPC和HTTP两种访问协议。对于HTTP协
-对于普通的模型而言(具体指通过Serving提供的IO保存的模型,并且没有对模型进行后处理),用户使用RPC服务不需要额外的开发即可实现服务启动,但需要开发一些Client端的代码来使用服务。对于Web服务的开发,需要用户现在Paddle Serving提供的Web Service框架中进行前后处理的开发,从而实现整个HTTP服务。
+For servable models saved from Paddle Serving IO API, users do not need to do extra coding work to startup a service, but may need some coding work on the client side. For development of Web Service plugin, a user needs to provide implementation of Web Service's preprocessing and postprocessing work if needed to get a HTTP service.
-### 4.1 Web服务开发
+### 4.1 Web Service Development
-Web服务有很多开源的框架,Paddle Serving当前集成了Flask框架,但这部分对用户不可见,在未来可能会提供性能更好的Web框架作为底层HTTP服务集成引擎。用户需要继承WebService,从而实现对rpc服务的输入输出进行加工的目的。
+Web Service has lots of open sourced framework. Currently Paddle Serving uses Flask as built-in service framework, and users are not aware of this. More efficient web service will be integrated in the furture if needed.
``` python
from paddle_serving_server.web_service import WebService
@@ -208,15 +213,15 @@ imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
imdb_service.run_server()
```
-`WebService`作为基类,提供将用户接受的HTTP请求转化为RPC输入的接口`preprocess`,同时提供对RPC请求返回的结果进行后处理的接口`postprocess`,继承`WebService`的子类,可以定义各种类型的成员函数。`WebService`的启动命令和普通RPC服务提供的启动API一致。
+`WebService` is a Base Class, providing inheritable interfaces such `preprocess` and `postprocess` for users to implement. In the inherited class of `WebService` class, users can define any functions they want and the startup function interface is the same as RPC service.
-## 5. 未来计划
+## 5. Future Plan
-### 5.1 有向无环图结构定义开放
-当前版本开放的python API仅支持用户定义Sequential类型的执行流,如果想要进行Server进程内复杂的计算,需要增加对应的用户API。
+### 5.1 Open DAG definition API
+Current version of Paddle Serving Server supports sequential type of execution flow. DAG definition API can be more helpful to users on complex tasks.
-### 5.2 云端自动部署能力
-为了方便用户更容易将Paddle的预测模型部署到线上,Paddle Serving在接下来的版本会提供Kubernetes生态下任务编排的工具。
+### 5.2 Auto Deployment on Cloud
+In order to make deployment more easily on public cloud, Paddle Serving considers to provides Operators on Kubernetes in submitting a service job.
-### 5.3 向量检索、树结构检索
-在推荐与广告场景的召回系统中,通常需要采用基于向量的快速检索或者基于树结构的快速检索,Paddle Serving会对这方面的检索引擎进行集成或扩展。
+### 5.3 Vector Indexing and Tree based Indexing
+In recommendation and advertisement systems, it is commonly seen to use vector based index or tree based indexing service to do candidate retrievals. These retrieval tasks will be built-in services of Paddle Serving.
diff --git a/doc/DESIGN_DOC_CN.md b/doc/DESIGN_DOC_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..312379cd7543e70095e5a6d8168aab06b79a0525
--- /dev/null
+++ b/doc/DESIGN_DOC_CN.md
@@ -0,0 +1,222 @@
+# Paddle Serving设计文档
+
+## 1. 整体设计目标
+
+- 长期使命:Paddle Serving是一个PaddlePaddle开源的在线服务框架,长期目标就是围绕着人工智能落地的最后一公里提供越来越专业、可靠、易用的服务。
+
+- 工业级:为了达到工业级深度学习模型在线部署的要求,
+Paddle Serving提供很多大规模场景需要的部署功能:1)分布式稀疏参数索引功能;2)高并发底层通信能力;3)模型管理、在线A/B流量测试、模型热加载。
+
+- 简单易用:为了让使用Paddle的用户能够以极低的成本部署模型,PaddleServing设计了一套与Paddle训练框架无缝打通的预测部署API,普通模型可以使用一行命令进行服务部署。
+
+- 功能扩展:当前,Paddle Serving支持C++、Python、Golang的客户端,未来也会面向不同类型的客户新增多种语言的客户端。在Paddle Serving的框架设计方面,尽管当前Paddle Serving以支持Paddle模型的部署为核心功能,
+用户可以很容易嵌入其他的机器学习库部署在线预测。
+
+## 2. 模块设计与实现
+
+### 2.1 Python API接口设计
+
+#### 2.1.1 训练模型的保存
+Paddle的模型预测需要重点关注的内容:1)模型的输入变量;2)模型的输出变量;3)模型结构和模型参数。Paddle Serving Python API提供用户可以在训练过程中保存模型的接口,并将Paddle Serving在部署阶段需要保存的配置打包保存,一个示例如下:
+``` python
+import paddle_serving_client.io as serving_io
+serving_io.save_model("serving_model", "client_conf",
+ {"words": data}, {"prediction": prediction},
+ fluid.default_main_program())
+```
+代码示例中,`{"words": data}`和`{"prediction": prediction}`分别指定了模型的输入和输出,`"words"`和`"prediction"`是输出和输出变量的别名,设计别名的目的是为了使开发者能够记忆自己训练模型的输入输出对应的字段。`data`和`prediction`则是Paddle训练过程中的`[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)`,通常代表张量([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor))或变长张量([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor))。调用保存命令后,会按照用户指定的`"serving_model"`和`"client_conf"`生成两个目录,内容如下:
+``` shell
+.
+├── client_conf
+│ ├── serving_client_conf.prototxt
+│ └── serving_client_conf.stream.prototxt
+└── serving_model
+ ├── embedding_0.w_0
+ ├── fc_0.b_0
+ ├── fc_0.w_0
+ ├── fc_1.b_0
+ ├── fc_1.w_0
+ ├── fc_2.b_0
+ ├── fc_2.w_0
+ ├── lstm_0.b_0
+ ├── lstm_0.w_0
+ ├── __model__
+ ├── serving_server_conf.prototxt
+ └── serving_server_conf.stream.prototxt
+```
+其中,`"serving_client_conf.prototxt"`和`"serving_server_conf.prototxt"`是Paddle Serving的Client和Server端需要加载的配置,`"serving_client_conf.stream.prototxt"`和`"serving_server_conf.stream.prototxt"`是配置文件的二进制形式。`"serving_model"`下保存的其他内容和Paddle保存的模型文件是一致的。我们会考虑未来在Paddle框架中直接保存可服务的配置,实现配置保存对用户无感。
+
+#### 2.1.2 服务端模型加载
+
+服务端的预测逻辑可以通过Paddle Serving Server端的API进行人工定义,一个例子:
+``` python
+import paddle_serving_server as serving
+op_maker = serving.OpMaker()
+read_op = op_maker.create('general_reader')
+dist_kv_op = op_maker.create('general_dist_kv')
+general_infer_op = op_maker.create('general_infer')
+general_response_op = op_maker.create('general_response')
+
+op_seq_maker = serving.OpSeqMaker()
+op_seq_maker.add_op(read_op)
+op_seq_maker.add_op(dist_kv_op)
+op_seq_maker.add_op(general_infer_op)
+op_seq_maker.add_op(general_response_op)
+```
+
+当前Paddle Serving在Server端支持的主要Op请参考如下列表:
+
+
+
+| Op 名称 | 描述 |
+|--------------|------|
+| `general_reader` | 通用数据格式的读取Op |
+| `genreal_infer` | 通用数据格式的Paddle预测Op |
+| `general_response` | 通用数据格式的响应Op |
+| `general_dist_kv` | 分布式索引Op |
+
+
+
+当前Paddle Serving中的预估引擎支持在CPU/GPU上进行预测,对应的预测服务安装包以及镜像也有两个。但无论是CPU上进行模型预估还是GPU上进行模型预估,普通模型的预测都可用一行命令进行启动。
+``` shell
+python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292
+```
+``` shell
+python -m paddle_serving_server_gpu.serve --model your_servable_model --thread 10 --port 9292
+```
+启动命令的选项列表如下:
+
+
+| 参数 | 类型 | 默认值 | 描述 |
+|--------------|------|-----------|--------------------------------|
+| `thread` | int | `4` | 服务端的并发数,通常与CPU核数一致即可 |
+| `port` | int | `9292` | 服务暴露给用户的端口 |
+| `name` | str | `""` | 服务名称,当用户指定时代表直接启动的是HTTP服务 |
+| `model` | str | `""` | 服务端模型文件夹路径 |
+| `gpu_ids` | str | `""` | 仅在paddle_serving_server_gpu中可以使用,功能与CUDA_VISIBLE_DEVICES一致 |
+
+
+
+举例`python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292`对应到具体的Server端具体配置如下
+``` python
+from paddle_serving_server import OpMaker, OpSeqMaker, Server
+
+op_maker = OpMaker()
+read_op = op_maker.create('general_reader')
+general_infer_op = op_maker.create('general_infer')
+general_response_op = op_maker.create('general_response')
+op_seq_maker = OpSeqMaker()
+op_seq_maker.add_op(read_op)
+op_seq_maker.add_op(general_infer_op)
+op_seq_maker.add_op(general_response_op)
+server = Server()
+server.set_op_sequence(op_seq_maker.get_op_sequence())
+server.set_num_threads(10)
+server.load_model_config(”your_servable_model“)
+server.prepare_server(port=9292, device="cpu")
+server.run_server()
+```
+
+#### 2.1.3 客户端访问API
+Paddle Serving支持远程服务访问的协议一种是基于RPC,另一种是HTTP。用户通过RPC访问,可以使用Paddle Serving提供的Python Client API,通过定制输入数据的格式来实现服务访问。下面的例子解释Paddle Serving Client如何定义输入数据。保存可部署模型时需要指定每个输入的别名,例如`sparse`和`dense`,对应的数据可以是离散的ID序列`[1, 1001, 100001]`,也可以是稠密的向量`[0.2, 0.5, 0.1, 0.4, 0.11, 0.22]`。当前Client的设计,对于离散的ID序列,支持Paddle中的`lod_level=0`和`lod_level=1`的情况,即张量以及一维变长张量。对于稠密的向量,支持`N-D Tensor`。用户不需要显式指定输入数据的形状,Paddle Serving的Client API会通过保存配置时记录的输入形状进行对应的检查。
+``` python
+feed_dict["sparse"] = [1, 1001, 100001]
+feed_dict["dense"] = [0.2, 0.5, 0.1, 0.4, 0.11, 0.22]
+fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
+```
+Client链接Server的代码,通常只需要加载保存模型时保存的Client端配置,以及指定要去访问的服务端点即可。为了保持内部访问进行数据并行的扩展能力,Paddle Serving Client允许定义多个服务端点。
+``` python
+client = Client()
+client.load_client_config('servable_client_configs')
+client.connect(["127.0.0.1:9292"])
+```
+
+
+### 2.2 底层通信机制
+Paddle Serving采用[baidu-rpc](https://github.com/apache/incubator-brpc)进行底层的通信。baidu-rpc是百度开源的一款PRC通信库,具有高并发、低延时等特点,已经支持了包括百度在内上百万在线预估实例、上千个在线预估服务,稳定可靠。
+
+### 2.3 核心执行引擎
+Paddle Serving的核心执行引擎是一个有向无环图,图中的每个节点代表预估服务的一个环节,例如计算模型预测打分就是其中一个环节。有向无环图有利于可并发节点充分利用部署实例内的计算资源,缩短延时。一个例子,当同一份输入需要送入两个不同的模型进行预估,并将两个模型预估的打分进行加权求和时,两个模型的打分过程即可以通过有向无环图的拓扑关系并发。
+
+
+
+
+
+
+### 2.4 微服务插件模式
+由于Paddle Serving底层采用基于C++的通信组件,并且核心框架也是基于C/C++编写,当用户想要在服务端定义复杂的前处理与后处理逻辑时,一种办法是修改Paddle Serving底层框架,重新编译源码。另一种方式可以通过在服务端嵌入轻量级的Web服务,通过在Web服务中实现更复杂的预处理逻辑,从而搭建一套逻辑完整的服务。当访问量超过了Web服务能够接受的范围,开发者有足够的理由开发一些高性能的C++预处理逻辑,并嵌入到Serving的原生服务库中。Web服务和RPC服务的关系以及他们的组合方式可以参考下文`用户类型`中的说明。
+
+## 3. 工业级特性
+
+### 3.1 分布式稀疏参数索引
+
+分布式稀疏参数索引通常在广告推荐中出现,并与分布式训练配合形成完整的离线-在线一体化部署。下图解释了其中的流程,产品的在线服务接受用户请求后将请求发送给预估服务,同时系统会记录用户的请求以进行相应的训练日志处理和拼接。离线分布式训练系统会针对流式产出的训练日志进行模型增量训练,而增量产生的模型会配送至分布式稀疏参数索引服务,同时对应的稠密的模型参数也会配送至在线的预估服务。在线服务由两部分组成,一部分是针对用户的请求提取特征后,将需要进行模型的稀疏参数索引的特征发送请求给分布式稀疏参数索引服务,针对分布式稀疏参数索引服务返回的稀疏参数再进行后续深度学习模型的计算流程,从而完成预估。
+
+
+
+
+
+
+
+为什么要使用Paddle Serving提供的分布式稀疏参数索引服务?1)在一些推荐场景中,模型的输入特征规模通常可以达到上千亿,单台机器无法支撑T级别模型在内存的保存,因此需要进行分布式存储。2)Paddle Serving提供的分布式稀疏参数索引服务,具有并发请求多个节点的能力,从而以较低的延时完成预估服务。
+
+### 3.2 模型管理、在线A/B流量测试、模型热加载
+
+Paddle Serving的C++引擎支持模型管理、在线A/B流量测试、模型热加载等功能,当前在Python API还有没完全开放这部分功能的配置,敬请期待。
+
+## 4. 用户类型
+Paddle Serving面向的用户提供RPC和HTTP两种访问协议。对于HTTP协议,我们更倾向于流量中小型的服务使用,并且对延时没有严格要求的AI服务开发者。对于RPC协议,我们面向流量较大,对延时要求更高的用户,此外RPC的客户端可能也处在一个大系统的服务中,这种情况下非常适合使用Paddle Serving提供的RPC服务。对于使用分布式稀疏参数索引服务而言,Paddle Serving的用户不需要关心底层的细节,其调用本质也是通过RPC服务再调用RPC服务。下图给出了当前设计的Paddle Serving可能会使用Serving服务的几种场景。
+
+
+
+
+
+
+
+对于普通的模型而言(具体指通过Serving提供的IO保存的模型,并且没有对模型进行后处理),用户使用RPC服务不需要额外的开发即可实现服务启动,但需要开发一些Client端的代码来使用服务。对于Web服务的开发,需要用户现在Paddle Serving提供的Web Service框架中进行前后处理的开发,从而实现整个HTTP服务。
+
+### 4.1 Web服务开发
+
+Web服务有很多开源的框架,Paddle Serving当前集成了Flask框架,但这部分对用户不可见,在未来可能会提供性能更好的Web框架作为底层HTTP服务集成引擎。用户需要继承WebService,从而实现对rpc服务的输入输出进行加工的目的。
+
+``` python
+from paddle_serving_server.web_service import WebService
+from imdb_reader import IMDBDataset
+import sys
+
+
+class IMDBService(WebService):
+ def prepare_dict(self, args={}):
+ if len(args) == 0:
+ exit(-1)
+ self.dataset = IMDBDataset()
+ self.dataset.load_resource(args["dict_file_path"])
+
+ def preprocess(self, feed={}, fetch=[]):
+ if "words" not in feed:
+ exit(-1)
+ res_feed = {}
+ res_feed["words"] = self.dataset.get_words_only(feed["words"])[0]
+ return res_feed, fetch
+
+
+imdb_service = IMDBService(name="imdb")
+imdb_service.load_model_config(sys.argv[1])
+imdb_service.prepare_server(
+ workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
+imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
+imdb_service.run_server()
+```
+
+`WebService`作为基类,提供将用户接受的HTTP请求转化为RPC输入的接口`preprocess`,同时提供对RPC请求返回的结果进行后处理的接口`postprocess`,继承`WebService`的子类,可以定义各种类型的成员函数。`WebService`的启动命令和普通RPC服务提供的启动API一致。
+
+## 5. 未来计划
+
+### 5.1 有向无环图结构定义开放
+当前版本开放的python API仅支持用户定义Sequential类型的执行流,如果想要进行Server进程内复杂的计算,需要增加对应的用户API。
+
+### 5.2 云端自动部署能力
+为了方便用户更容易将Paddle的预测模型部署到线上,Paddle Serving在接下来的版本会提供Kubernetes生态下任务编排的工具。
+
+### 5.3 向量检索、树结构检索
+在推荐与广告场景的召回系统中,通常需要采用基于向量的快速检索或者基于树结构的快速检索,Paddle Serving会对这方面的检索引擎进行集成或扩展。
diff --git a/doc/DESIGN_DOC_EN.md b/doc/DESIGN_DOC_EN.md
deleted file mode 100644
index 2f8a36ea6686b5add2a7e4e407eabfd14167490d..0000000000000000000000000000000000000000
--- a/doc/DESIGN_DOC_EN.md
+++ /dev/null
@@ -1,227 +0,0 @@
-# Paddle Serving Design Doc
-
-## 1. Design Objectives
-
-- Long Term Vision: Online deployment of deep learning models will be a user-facing application in the future. Any AI developer will face the problem of deploying an online service for his or her trained model.
-Paddle Serving is the official open source online deployment framework. The long term goal of Paddle Serving is to provide professional, reliable and easy-to-use online service to the last mile of AI application.
-
-- Easy-To-Use: For algorithmic developers to quickly deploy their models online, Paddle Serving designs APIs that can be used with Paddle's training process seamlessly, most Paddle models can be deployed as a service with one line command.
-
-- Industrial Oriented: To meet industrial deployment requirements, Paddle Serving supports lots of large-scale deployment functions: 1) Distributed Sparse Embedding Indexing. 2) Highly concurrent underlying communications. 3) Model Management, online A/B test, model online loading.
-
-- Extensibility: Paddle Serving supports C++, Python and Golang client, and will support more clients with different languages. It is very easy to extend Paddle Serving to support other machine learning inference library, although currently Paddle inference library is the only official supported inference backend.
-
-
-## 2. Module design and implementation
-
-### 2.1 Python API interface design
-
-#### 2.1.1 save a servable model
-The inference phase of Paddle model focuses on 1) input variables of the model. 2) output variables of the model. 3) model structure and model parameters. Paddle Serving Python API provides a `save_model` interface for trained model, and save necessary information for Paddle Serving to use during deployment phase. An example is as follows:
-
-``` python
-import paddle_serving_client.io as serving_io
-serving_io.save_model("serving_model", "client_conf",
- {"words": data}, {"prediction": prediction},
- fluid.default_main_program())
-```
-In the example, `{"words": data}` and `{"prediction": prediction}` assign the inputs and outputs of a model. `"words"` and `"prediction"` are alias names of inputs and outputs. The design of alias name is to help developers to memorize model inputs and model outputs. `data` and `prediction` are Paddle `[Variable](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Variable_cn.html#variable)` in training phase that often represents ([Tensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/fluid_cn/Tensor_cn.html#tensor)) or ([LodTensor](https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor)). When the `save_model` API is called, two directories called `"serving_model"` and `"client_conf"` will be generated. The content of the saved model is as follows:
-
-``` shell
-.
-├── client_conf
-│ ├── serving_client_conf.prototxt
-│ └── serving_client_conf.stream.prototxt
-└── serving_model
- ├── embedding_0.w_0
- ├── fc_0.b_0
- ├── fc_0.w_0
- ├── fc_1.b_0
- ├── fc_1.w_0
- ├── fc_2.b_0
- ├── fc_2.w_0
- ├── lstm_0.b_0
- ├── lstm_0.w_0
- ├── __model__
- ├── serving_server_conf.prototxt
- └── serving_server_conf.stream.prototxt
-```
-`"serving_client_conf.prototxt"` and `"serving_server_conf.prototxt"` are the client side and the server side configurations of Paddle Serving, and `"serving_client_conf.stream.prototxt"` and `"serving_server_conf.stream.prototxt"` are the corresponding parts. Other contents saved in the directory are the same as Paddle saved inference model. We are considering to support `save_model` interface in Paddle training framework so that a user is not aware of the servable configurations.
-
-#### 2.1.2 Model loading on the server side
-
-Prediction logics on the server side can be defined through Paddle Serving Server API with a few lines of code, an example is as follows:
-``` python
-import paddle_serving_server as serving
-op_maker = serving.OpMaker()
-read_op = op_maker.create('general_reader')
-dist_kv_op = op_maker.create('general_dist_kv')
-general_infer_op = op_maker.create('general_infer')
-general_response_op = op_maker.create('general_response')
-
-op_seq_maker = serving.OpSeqMaker()
-op_seq_maker.add_op(read_op)
-op_seq_maker.add_op(dist_kv_op)
-op_seq_maker.add_op(general_infer_op)
-op_seq_maker.add_op(general_response_op)
-```
-Current Paddle Serving supports operator list on the server side as follows:
-
-
-
-| Op Name | Description |
-|--------------|------|
-| `general_reader` | General Data Reading Operator |
-| `genreal_infer` | General Data Inference with Paddle Operator |
-| `general_response` | General Data Response Operator |
-| `general_dist_kv` | Distributed Sparse Embedding Indexing |
-
-
-
-Paddle Serving supports inference engine on multiple devices. Current supports are CPU and GPU engine. Docker Images of CPU and GPU are provided officially. User can use one line command to start an inference service either on CPU or on GPU.
-
-``` shell
-python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292
-```
-``` shell
-python -m paddle_serving_server_gpu.serve --model your_servable_model --thread 10 --port 9292
-```
-
-Options of startup command are listed below:
-
-
-| Arguments | Types | Defaults | Descriptions |
-|--------------|------|-----------|--------------------------------|
-| `thread` | int | `4` | Concurrency on server side, usually equal to the number of CPU core |
-| `port` | int | `9292` | Port exposed to users |
-| `name` | str | `""` | Service name that if a user specifies, the name of HTTP service is allocated |
-| `model` | str | `""` | Servable models for Paddle Serving |
-| `gpu_ids` | str | `""` | Supported only in paddle_serving_server_gpu, similar to the usage of CUDA_VISIBLE_DEVICES |
-
-
-
-For example, `python -m paddle_serving_server.serve --model your_servable_model --thread 10 --port 9292` is the same as the following code as user can define:
-``` python
-from paddle_serving_server import OpMaker, OpSeqMaker, Server
-
-op_maker = OpMaker()
-read_op = op_maker.create('general_reader')
-general_infer_op = op_maker.create('general_infer')
-general_response_op = op_maker.create('general_response')
-op_seq_maker = OpSeqMaker()
-op_seq_maker.add_op(read_op)
-op_seq_maker.add_op(general_infer_op)
-op_seq_maker.add_op(general_response_op)
-server = Server()
-server.set_op_sequence(op_seq_maker.get_op_sequence())
-server.set_num_threads(10)
-server.load_model_config(”your_servable_model“)
-server.prepare_server(port=9292, device="cpu")
-server.run_server()
-```
-
-#### 2.1.3 Paddle Serving Client API
-Paddle Serving supports remote service access through RPC(remote procedure call) and HTTP. RPC access of remote service can be called through Client API of Paddle Serving. A user can define data preprocess function before calling Paddle Serving's client API. The example below explains how to define the input data of Paddle Serving Client. The servable model has two inputs with alias name of `sparse` and `dense`. `sparse` corresponds to sparse sequence ids such as `[1, 1001, 100001]` and `dense` corresponds to dense vector such as `[0.2, 0.5, 0.1, 0.4, 0.11, 0.22]`. For sparse sequence data, current design supports `lod_level=0` and `lod_level=1` of Paddle, that corresponds to `Tensor` and `LodTensor`. For dense vector, current design supports any `N-D Tensor`. Users do not need to assign the shape of inference model input. The Paddle Serving Client API will check the input data's shape with servable configurations.
-
-``` python
-feed_dict["sparse"] = [1, 1001, 100001]
-feed_dict["dense"] = [0.2, 0.5, 0.1, 0.4, 0.11, 0.22]
-fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
-```
-
-The following code sample shows that Paddle Serving Client API connects to Server API with endpoint of the servers. To use the data parallelism ability during prediction, Paddle Serving Client allows users to define multiple server endpoints.
-``` python
-client = Client()
-client.load_client_config('servable_client_configs')
-client.connect(["127.0.0.1:9292"])
-```
-
-### 2.2 Underlying Communication Mechanism
-Paddle Serving adopts [baidu-rpc](https://github.com/apache/incubator-brpc) as underlying communication layer. baidu-rpc is an open-source RPC communication library with high concurrency and low latency advantages compared with other open source RPC library. Millions of instances and thousands of services are using baidu-rpc within Baidu.
-
-### 2.3 Core Execution Engine
-The core execution engine of Paddle Serving is a Directed acyclic graph(DAG). In the DAG, each node represents a phase of inference service, such as paddle inference prediction, data preprocessing and data postprocessing. DAG can fully parallelize the computation efficiency and can fully utilize the computation resources. For example, when a user has input data that needs to be feed into two models, and combine the scores of the two models, the computation of model scoring is parallelized through DAG.
-
-
-
-
-
-
-
-### 2.4 Micro service plugin
-The underlying communication of Paddle Serving is implemented with C++ as well as the core framework, it is hard for users who do not familiar with C++ to implement new Paddle Serving Server Operators. Another approach is to use the light-weighted Web Service in Paddle Serving Server that can be viewed as a plugin. A user can implement complex data preprocessing and postprocessing logics to build a complex AI service. If access of the AI service has a large volumn, it is worth to implement the service with high performance Paddle Serving Server operators. The relationship between Web Service and RPC Service can be referenced in `User Type`.
-
-## 3. Industrial Features
-
-### 3.1 Distributed Sparse Parameter Indexing
-
-Distributed Sparse Parameter Indexing is commonly seen in advertising and recommendation scenarios, and is often used coupled with distributed training. The figure below explains a commonly seen architecture for online recommendation. When the recommendation service receives a request from a user, the system will automatically collects training log for the offline distributed online training. Mean while, the request is sent to Paddle Serving Server. For sparse features, distributed sparse parameter index service is called so that sparse parameters can be looked up. The dense input features together with the looked up sparse model parameters are fed into the Paddle Inference Node of the DAG in Paddle Serving Server. Then the score can be responsed through RPC to product service for item ranking.
-
-
-
-
-
-
-
-Why do we need to support distributed sparse parameter indexing in Paddle Serving? 1) In some recommendation scenarios, the number of features can be up to hundreds of billions that a single node can not hold the parameters within random access memory. 2) Paddle Serving supports distributed sparse parameter indexing that can couple with paddle inference. Users do not need to do extra work to have a low latency inference engine with hundreds of billions of parameters.
-
-### 3.2 Model Management, online A/B test, Model Online Reloading
-
-Paddle Serving's C++ engine supports model management, online A/B test and model online reloading. Currently, python API is not released yet, please wait for the next release.
-
-## 4. User Types
-Paddle Serving provides RPC and HTTP protocol for users. For HTTP service, we recommend users with median or small traffic services to use, and the latency is not a strict requirement. For RPC protocol, we recommend high traffic services and low latency required services to use. For users who use distributed sparse parameter indexing built-in service, it is not necessary to care about the underlying details of communication. The following figure gives out several scenarios that user may want to use Paddle Serving.
-
-
-
-
-
-
-
-For servable models saved from Paddle Serving IO API, users do not need to do extra coding work to startup a service, but may need some coding work on the client side. For development of Web Service plugin, a user needs to provide implementation of Web Service's preprocessing and postprocessing work if needed to get a HTTP service.
-
-### 4.1 Web Service Development
-
-Web Service has lots of open sourced framework. Currently Paddle Serving uses Flask as built-in service framework, and users are not aware of this. More efficient web service will be integrated in the furture if needed.
-
-``` python
-from paddle_serving_server.web_service import WebService
-from imdb_reader import IMDBDataset
-import sys
-
-
-class IMDBService(WebService):
- def prepare_dict(self, args={}):
- if len(args) == 0:
- exit(-1)
- self.dataset = IMDBDataset()
- self.dataset.load_resource(args["dict_file_path"])
-
- def preprocess(self, feed={}, fetch=[]):
- if "words" not in feed:
- exit(-1)
- res_feed = {}
- res_feed["words"] = self.dataset.get_words_only(feed["words"])[0]
- return res_feed, fetch
-
-
-imdb_service = IMDBService(name="imdb")
-imdb_service.load_model_config(sys.argv[1])
-imdb_service.prepare_server(
- workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
-imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
-imdb_service.run_server()
-```
-
-`WebService` is a Base Class, providing inheritable interfaces such `preprocess` and `postprocess` for users to implement. In the inherited class of `WebService` class, users can define any functions they want and the startup function interface is the same as RPC service.
-
-## 5. Future Plan
-
-### 5.1 Open DAG definition API
-Current version of Paddle Serving Server supports sequential type of execution flow. DAG definition API can be more helpful to users on complex tasks.
-
-### 5.2 Auto Deployment on Cloud
-In order to make deployment more easily on public cloud, Paddle Serving considers to provides Operators on Kubernetes in submitting a service job.
-
-### 5.3 Vector Indexing and Tree based Indexing
-In recommendation and advertisement systems, it is commonly seen to use vector based index or tree based indexing service to do candidate retrievals. These retrieval tasks will be built-in services of Paddle Serving.
diff --git a/doc/DOCKER.md b/doc/DOCKER.md
index 325ec906c04c708d8e62ff2ae2900bc367e049b6..0e865c66e2da32a4e0ed15df9f2632b98ffbcedf 100644
--- a/doc/DOCKER.md
+++ b/doc/DOCKER.md
@@ -1,53 +1,55 @@
-# Docker编译环境准备
+# Docker compilation environment preparation
-## 环境要求
+([简体中文](./DOCKER_CN.md)|English)
-+ 开发机上已安装Docker。
-+ 编译GPU版本需要安装nvidia-docker。
+## Environmental requirements
-## Dockerfile文件
++ Docker is installed on the development machine.
++ Compiling the GPU version requires nvidia-docker.
-[CPU版本Dockerfile](../Dockerfile)
+## Dockerfile
-[GPU版本Dockerfile](../Dockerfile.gpu)
+[CPU Version Dockerfile](../tools/Dockerfile)
-## 使用方法
+[GPU Version Dockerfile](../tools/Dockerfile.gpu)
-### 构建Docker镜像
+## Instructions
-建立新目录,复制Dockerfile内容到该目录下Dockerfile文件。
+### Building Docker Image
-执行
+Create a new directory and copy the Dockerfile to this directory.
+
+Run
```bash
docker build -t serving_compile:cpu .
```
-或者
+Or
```bash
docker build -t serving_compile:cuda9 .
```
-## 进入Docker
+## Enter Docker Container
-CPU版本请执行
+CPU Version please run
```bash
docker run -it serving_compile:cpu bash
```
-GPU版本请执行
+GPU Version please run
```bash
docker run -it --runtime=nvidia -it serving_compile:cuda9 bash
```
-## Docker编译出的可执行文件支持的环境列表
+## List of supported environments compiled by Docker
-经过验证的环境列表如下:
+The list of supported environments is as follows::
-| CPU Docker编译出的可执行文件支持的系统环境 |
+| System Environment Supported by CPU Docker Compiled Executables |
| -------------------------- |
| Centos6 |
| Centos7 |
@@ -56,7 +58,7 @@ docker run -it --runtime=nvidia -it serving_compile:cuda9 bash
-| GPU Docker编译出的可执行文件支持的系统环境 |
+| System Environment Supported by GPU Docker Compiled Executables |
| ---------------------------------- |
| Centos6_cuda9_cudnn7 |
| Centos7_cuda9_cudnn7 |
@@ -65,6 +67,6 @@ docker run -it --runtime=nvidia -it serving_compile:cuda9 bash
-**备注:**
-+ 若执行预编译版本出现找不到libcrypto.so.10、libssl.so.10的情况,可以将Docker环境中的/usr/lib64/libssl.so.10与/usr/lib64/libcrypto.so.10复制到可执行文件所在目录。
-+ CPU预编译版本仅可在CPU机器上执行,GPU预编译版本仅可在GPU机器上执行。
+**Remarks:**
++ If you cannot find libcrypto.so.10 and libssl.so.10 when you execute the pre-compiled version, you can change /usr/lib64/libssl.so.10 and /usr/lib64/libcrypto.so in the Docker environment. 10 Copy to the directory where the executable is located.
++ CPU pre-compiled version can only be executed on CPU machines, GPU pre-compiled version can only be executed on GPU machines.
diff --git a/doc/DOCKER_CN.md b/doc/DOCKER_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..92cc3ac6ea34d6399d6204ff7b9ec2d12b412601
--- /dev/null
+++ b/doc/DOCKER_CN.md
@@ -0,0 +1,72 @@
+# Docker编译环境准备
+
+(简体中文|[English](./DOCKER.md))
+
+## 环境要求
+
++ 开发机上已安装Docker。
++ 编译GPU版本需要安装nvidia-docker。
+
+## Dockerfile文件
+
+[CPU版本Dockerfile](../tools/Dockerfile)
+
+[GPU版本Dockerfile](../tools/Dockerfile.gpu)
+
+## 使用方法
+
+### 构建Docker镜像
+
+建立新目录,复制Dockerfile内容到该目录下Dockerfile文件。
+
+执行
+
+```bash
+docker build -t serving_compile:cpu .
+```
+
+或者
+
+```bash
+docker build -t serving_compile:cuda9 .
+```
+
+## 进入Docker
+
+CPU版本请执行
+
+```bash
+docker run -it serving_compile:cpu bash
+```
+
+GPU版本请执行
+
+```bash
+docker run -it --runtime=nvidia -it serving_compile:cuda9 bash
+```
+
+## Docker编译出的可执行文件支持的环境列表
+
+经过验证的环境列表如下:
+
+| CPU Docker编译出的可执行文件支持的系统环境 |
+| -------------------------- |
+| Centos6 |
+| Centos7 |
+| Ubuntu16.04 |
+| Ubuntu18.04 |
+
+
+
+| GPU Docker编译出的可执行文件支持的系统环境 |
+| ---------------------------------- |
+| Centos6_cuda9_cudnn7 |
+| Centos7_cuda9_cudnn7 |
+| Ubuntu16.04_cuda9_cudnn7 |
+| Ubuntu16.04_cuda10_cudnn7 |
+
+
+
+**备注:**
++ 若执行预编译版本出现找不到libcrypto.so.10、libssl.so.10的情况,可以将Docker环境中的/usr/lib64/libssl.so.10与/usr/lib64/libcrypto.so.10复制到可执行文件所在目录。
++ CPU预编译版本仅可在CPU机器上执行,GPU预编译版本仅可在GPU机器上执行。
diff --git a/doc/IMDB_GO_CLIENT.md b/doc/IMDB_GO_CLIENT.md
index 5b10192597f393d65f1387bfb39615e1777ec2d6..5befc0226235dd599b980d98594dba78e54bf530 100644
--- a/doc/IMDB_GO_CLIENT.md
+++ b/doc/IMDB_GO_CLIENT.md
@@ -1,5 +1,7 @@
# How to use Go Client of Paddle Serving
+([简体中文](./IMDB_GO_CLIENT_CN.md)|English)
+
This document shows how to use Go as your client language. For Go client in Paddle Serving, a simple client package is provided https://github.com/PaddlePaddle/Serving/tree/develop/go/serving_client, a user can import this package as needed. Here is a simple example of sentiment analysis task based on IMDB dataset.
### Install
@@ -15,7 +17,7 @@ pip install paddle-serving-server
### Download Text Classification Model
``` shell
-wget https://paddle-serving.bj.bcebos.com/data%2Ftext_classification%2Fimdb_serving_example.tar.gz
+wget https://paddle-serving.bj.bcebos.com/data/text_classification/imdb_serving_example.tar.gz
tar -xzf imdb_serving_example.tar.gz
```
diff --git a/doc/IMDB_GO_CLIENT_CN.md b/doc/IMDB_GO_CLIENT_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..d14abe647038846aeeeebf9484f1c02e151b4275
--- /dev/null
+++ b/doc/IMDB_GO_CLIENT_CN.md
@@ -0,0 +1,194 @@
+# 如何在Paddle Serving使用Go Client
+
+(简体中文|[English](./IMDB_GO_CLIENT.md))
+
+本文档说明了如何将Go用作客户端语言。对于Paddle Serving中的Go客户端,提供了一个简单的客户端程序包https://github.com/PaddlePaddle/Serving/tree/develop/go/serving_client, 用户可以根据需要引用该程序包。这是一个基于IMDB数据集的情感分析任务的简单示例。
+
+### 安装
+
+我们假设您已经安装了Go 1.9.2或更高版本,并且安装了python 2.7版本
+
+```shell
+go get github.com/PaddlePaddle/Serving/go/serving_client
+go get github.com/PaddlePaddle/Serving/go/proto
+pip install paddle-serving-server
+```
+### 下载文本分类模型
+
+```shell
+wget https://paddle-serving.bj.bcebos.com/data/text_classification/imdb_serving_example.tar.gz
+tar -xzf imdb_serving_example.tar.gz
+```
+
+### 服务器端代码
+
+```python
+# test_server_go.py
+import os
+import sys
+from paddle_serving_server import OpMaker
+from paddle_serving_server import OpSeqMaker
+from paddle_serving_server import Server
+
+op_maker = OpMaker ()
+read_op = op_maker.create ('general_text_reader')
+general_infer_op = op_maker.create ('general_infer')
+general_response_op = op_maker.create ('general_text_response')
+
+op_seq_maker = OpSeqMaker ()
+op_seq_maker.add_op (read_op)
+op_seq_maker.add_op (general_infer_op)
+op_seq_maker.add_op (general_response_op)
+
+server = Server ()
+server.set_op_sequence (op_seq_maker.get_op_sequence ())
+server.load_model_config (sys.argv [1])
+server.prepare_server (workdir = "work_dir1", port = 9292, device = "cpu")
+server.run_server ()
+```
+
+### 启动服务器
+
+```shell
+python test_server_go.py ./serving_server_model/ 9292
+```
+
+### 客户端代码示例
+
+```go
+// imdb_client.go
+package main
+
+import (
+ "io"
+ "fmt"
+ "strings"
+ "bufio"
+ "strconv"
+ "os"
+ serving_client "github.com/PaddlePaddle/Serving/go/serving_client"
+)
+
+func main () {
+ var config_file_path string
+ config_file_path = os.Args [1]
+ handle: = serving_client.LoadModelConfig (config_file_path)
+ handle = serving_client.Connect ("127.0.0.1", "9292", handle)
+
+ test_file_path: = os.Args [2]
+ fi, err: = os.Open (test_file_path)
+ if err! = nil {
+ fmt.Print (err)
+ }
+
+ defer fi.Close ()
+ br: = bufio.NewReader (fi)
+
+ fetch: = [] string {"cost", "acc", "prediction"}
+
+ var result map [string] [] float32
+
+ for {
+ line, err: = br.ReadString ('\ n')
+if err == io.EOF {
+break
+}
+
+line = strings.Trim (line, "\ n")
+
+var words = [] int64 {}
+
+s: = strings.Split (line, "")
+value, err: = strconv.Atoi (s [0])
+var feed_int_map map [string] [] int64
+
+for _, v: = range s [1: value + 1] {
+int_v, _: = strconv.Atoi (v)
+words = append (words, int64 (int_v))
+}
+
+label, err: = strconv.Atoi (s [len (s) -1])
+
+if err! = nil {
+panic (err)
+}
+
+feed_int_map = map [string] [] int64 {}
+feed_int_map ["words"] = words
+feed_int_map ["label"] = [] int64 {int64 (label)}
+Ranch
+result = serving_client.Predict (handle, feed_int_map, fetch)
+fmt.Println (result ["prediction"] [1], "\ t", int64 (label))
+ }
+}
+```
+
+### 基于IMDB测试集的预测
+
+```python
+go run imdb_client.go serving_client_conf / serving_client_conf.stream.prototxt test.data> result
+```
+
+### 计算精度
+
+```python
+// acc.go
+package main
+
+import (
+ "io"
+ "os"
+ "fmt"
+ "bufio"
+ "strings"
+ "strconv"
+)
+
+func main () {
+ score_file: = os.Args [1]
+ fi, err: = os.Open (score_file)
+ if err! = nil {
+ fmt.Print (err)
+ }
+
+ defer fi.Close ()
+ br: = bufio.NewReader (fi)
+
+ total: = int (0)
+ acc: = int (0)
+ for {
+ line, err: = br.ReadString ('\ n')
+ if err == io.EOF {
+ break
+ }
+
+ line = strings.Trim (line, "\ n")
+ s: = strings.Split (line, "\ t")
+ prob_str: = strings.Trim (s [0], "")
+ label_str: = strings.Trim (s [1], "")
+ prob, err: = strconv.ParseFloat (prob_str, 32)
+ if err! = nil {
+ panic (err)
+ }
+ label, err: = strconv.ParseFloat (label_str, 32)
+ if err! = nil {
+ panic (err)
+ }
+ if (prob-0.5) * (label-0.5)> 0 {
+ acc ++
+ }
+ total ++
+ }
+ fmt.Println ("total num:", total)
+ fmt.Println ("acc num:", acc)
+ fmt.Println ("acc:", float32 (acc) / float32 (total))
+
+}
+```
+
+```shell
+go run acc.go result
+total num: 25000
+acc num: 22014
+acc: 0.88056
+```
diff --git a/doc/NEW_OPERATOR.md b/doc/NEW_OPERATOR.md
index f839be94aaa2ae9993d935c0af69bcde33b9d66f..ab1ff42adea44eec26e84bd4356bc4313d420ce2 100644
--- a/doc/NEW_OPERATOR.md
+++ b/doc/NEW_OPERATOR.md
@@ -1,5 +1,7 @@
# How to write an general operator?
+([简体中文](./NEW_OPERATOR_CN.md)|English)
+
In this document, we mainly focus on how to develop a new server side operator for PaddleServing. Before we start to write a new operator, let's look at some sample code to get the basic idea of writing a new operator for server. We assume you have known the basic computation logic on server side of PaddleServing, please reference to []() if you do not know much about it. The following code can be visited at `core/general-server/op` of Serving repo.
``` c++
diff --git a/doc/NEW_OPERATOR_CN.md b/doc/NEW_OPERATOR_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..d659b5f328cfbfc48ec7f3016037b12f34139b73
--- /dev/null
+++ b/doc/NEW_OPERATOR_CN.md
@@ -0,0 +1,149 @@
+# 如何开发一个新的General Op?
+
+(简体中文|[English](./NEW_OPERATOR.md))
+
+在本文档中,我们主要集中于如何为Paddle Serving开发新的服务器端运算符。 在开始编写新运算符之前,让我们看一些示例代码以获得为服务器编写新运算符的基本思想。 我们假设您已经知道Paddle Serving服务器端的基本计算逻辑。 下面的代码您可以在 Serving代码库下的 `core/general-server/op` 目录查阅。
+
+
+``` c++
+// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+#include
+#include
+#ifdef BCLOUD
+#ifdef WITH_GPU
+#include "paddle/paddle_inference_api.h"
+#else
+#include "paddle/fluid/inference/api/paddle_inference_api.h"
+#endif
+#else
+#include "paddle_inference_api.h" // NOLINT
+#endif
+#include "core/general-server/general_model_service.pb.h"
+#include "core/general-server/op/general_infer_helper.h"
+
+namespace baidu {
+namespace paddle_serving {
+namespace serving {
+
+class GeneralInferOp
+ : public baidu::paddle_serving::predictor::OpWithChannel {
+ public:
+ typedef std::vector TensorVector;
+
+ DECLARE_OP(GeneralInferOp);
+
+ int inference();
+
+};
+
+} // namespace serving
+} // namespace paddle_serving
+} // namespace baidu
+```
+
+## 定义一个Op
+
+上面的头文件声明了一个名为`GeneralInferOp`的PaddleServing运算符。 在运行时,将调用函数 `int inference()`。 通常,我们将服务器端运算符定义为baidu::paddle_serving::predictor::OpWithChannel的子类,并使用 `GeneralBlob` 数据结构。
+
+## 在Op之间使用 `GeneralBlob`
+
+`GeneralBlob` 是一种可以在服务器端运算符之间使用的数据结构。 `tensor_vector`是`GeneralBlob`中最重要的数据结构。 服务器端的操作员可以将多个`paddle::PaddleTensor`作为输入,并可以将多个`paddle::PaddleTensor`作为输出。 特别是,`tensor_vector`可以在没有内存拷贝的操作下输入到Paddle推理引擎中。
+
+``` c++
+struct GeneralBlob {
+ std::vector tensor_vector;
+ int64_t time_stamp[20];
+ int p_size = 0;
+
+ int _batch_size;
+
+ void Clear() {
+ size_t tensor_count = tensor_vector.size();
+ for (size_t ti = 0; ti < tensor_count; ++ti) {
+ tensor_vector[ti].shape.clear();
+ }
+ tensor_vector.clear();
+ }
+
+ int SetBatchSize(int batch_size) { _batch_size = batch_size; }
+
+ int GetBatchSize() const { return _batch_size; }
+ std::string ShortDebugString() const { return "Not implemented!"; }
+};
+```
+
+### 实现 `int Inference()`
+
+``` c++
+int GeneralInferOp::inference() {
+ VLOG(2) << "Going to run inference";
+ const GeneralBlob *input_blob = get_depend_argument(pre_name());
+ VLOG(2) << "Get precedent op name: " << pre_name();
+ GeneralBlob *output_blob = mutable_data();
+
+ if (!input_blob) {
+ LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name();
+ return -1;
+ }
+
+ const TensorVector *in = &input_blob->tensor_vector;
+ TensorVector *out = &output_blob->tensor_vector;
+ int batch_size = input_blob->GetBatchSize();
+ VLOG(2) << "input batch size: " << batch_size;
+
+ output_blob->SetBatchSize(batch_size);
+
+ VLOG(2) << "infer batch size: " << batch_size;
+
+ Timer timeline;
+ int64_t start = timeline.TimeStampUS();
+ timeline.Start();
+
+ if (InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)) {
+ LOG(ERROR) << "Failed do infer in fluid model: " << GENERAL_MODEL_NAME;
+ return -1;
+ }
+
+ int64_t end = timeline.TimeStampUS();
+ CopyBlobInfo(input_blob, output_blob);
+ AddBlobInfo(output_blob, start);
+ AddBlobInfo(output_blob, end);
+ return 0;
+}
+DEFINE_OP(GeneralInferOp);
+```
+
+`input_blob` 和 `output_blob` 都有很多的 `paddle::PaddleTensor`, 且Paddle预测库会被 `InferManager::instance().infer(GENERAL_MODEL_NAME, in, out, batch_size)`调用。此函数中的其他大多数代码都与性能分析有关,将来我们也可能会删除多余的代码。
+
+
+基本上,以上代码可以实现一个新的运算符。如果您想访问字典资源,可以参考`core/predictor/framework/resource.cpp`来添加全局可见资源。资源的初始化在启动服务器的运行时执行。
+
+## 定义 Python API
+
+在服务器端为Paddle Serving定义C++运算符后,最后一步是在Python API中为Paddle Serving服务器API添加注册, `python/paddle_serving_server/__init__.py`文件里有关于API注册的代码如下
+
+``` python
+self.op_dict = {
+ "general_infer": "GeneralInferOp",
+ "general_reader": "GeneralReaderOp",
+ "general_response": "GeneralResponseOp",
+ "general_text_reader": "GeneralTextReaderOp",
+ "general_text_response": "GeneralTextResponseOp",
+ "general_single_kv": "GeneralSingleKVOp",
+ "general_dist_kv": "GeneralDistKVOp"
+ }
+```
diff --git a/doc/README.md b/doc/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5d529175054fa97c495b2a7581fdcb2fe0e4c394
--- /dev/null
+++ b/doc/README.md
@@ -0,0 +1,119 @@
+# Paddle Serving
+
+([简体中文](./README_CN.md)|English)
+
+Paddle Serving is PaddlePaddle's online estimation service framework, which can help developers easily implement remote prediction services that call deep learning models from mobile and server ends. At present, Paddle Serving is mainly based on models that support PaddlePaddle training. It can be used in conjunction with the Paddle training framework to quickly deploy inference services. Paddle Serving is designed around common industrial-level deep learning model deployment scenarios. Some common functions include multi-model management, model hot loading, [Baidu-rpc](https://github.com/apache/incubator-brpc)-based high-concurrency low-latency response capabilities, and online model A/B tests. The API that cooperates with the Paddle training framework can enable users to seamlessly transition between training and remote deployment, improving the landing efficiency of deep learning models.
+
+------------
+
+## Quick Start
+
+Paddle Serving's current develop version supports lightweight Python API for fast predictions, and training with Paddle can get through. We take the most classic Boston house price prediction as an example to fully explain the process of model training on a single machine and model deployment using Paddle Serving.
+
+#### Install
+
+It is highly recommended that you build Paddle Serving inside Docker, please read [How to run PaddleServing in Docker](RUN_IN_DOCKER.md)
+
+```
+pip install paddle-serving-client
+pip install paddle-serving-server
+```
+
+#### Training Script
+``` python
+import sys
+import paddle
+import paddle.fluid as fluid
+
+train_reader = paddle.batch(paddle.reader.shuffle(
+ paddle.dataset.uci_housing.train(), buf_size=500), batch_size=16)
+
+test_reader = paddle.batch(paddle.reader.shuffle(
+ paddle.dataset.uci_housing.test(), buf_size=500), batch_size=16)
+
+x = fluid.data(name='x', shape=[None, 13], dtype='float32')
+y = fluid.data(name='y', shape=[None, 1], dtype='float32')
+
+y_predict = fluid.layers.fc(input=x, size=1, act=None)
+cost = fluid.layers.square_error_cost(input=y_predict, label=y)
+avg_loss = fluid.layers.mean(cost)
+sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01)
+sgd_optimizer.minimize(avg_loss)
+
+place = fluid.CPUPlace()
+feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
+exe = fluid.Executor(place)
+exe.run(fluid.default_startup_program())
+
+import paddle_serving_client.io as serving_io
+
+for pass_id in range(30):
+ for data_train in train_reader():
+ avg_loss_value, = exe.run(
+ fluid.default_main_program(),
+ feed=feeder.feed(data_train),
+ fetch_list=[avg_loss])
+
+serving_io.save_model(
+ "serving_server_model", "serving_client_conf",
+ {"x": x}, {"y": y_predict}, fluid.default_main_program())
+```
+
+#### Server Side Code
+``` python
+import sys
+from paddle_serving.serving_server import OpMaker
+from paddle_serving.serving_server import OpSeqMaker
+from paddle_serving.serving_server import Server
+
+op_maker = OpMaker()
+read_op = op_maker.create('general_reader')
+general_infer_op = op_maker.create('general_infer')
+
+op_seq_maker = OpSeqMaker()
+op_seq_maker.add_op(read_op)
+op_seq_maker.add_op(general_infer_op)
+
+server = Server()
+server.set_op_sequence(op_seq_maker.get_op_sequence())
+server.load_model_config(sys.argv[1])
+server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
+server.run_server()
+```
+
+#### Launch Server End
+``` shell
+python test_server.py serving_server_model
+```
+
+#### Client Prediction
+``` python
+from paddle_serving_client import Client
+import paddle
+import sys
+
+client = Client()
+client.load_client_config(sys.argv[1])
+client.connect(["127.0.0.1:9292"])
+
+test_reader = paddle.batch(paddle.reader.shuffle(
+ paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1)
+
+for data in test_reader():
+ fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["y"])
+ print("{} {}".format(fetch_map["y"][0], data[0][1][0]))
+
+```
+
+### Document
+
+[Design Doc](DESIGN.md)
+
+[FAQ](FAQ.md)
+
+### Senior Developer Guildlines
+
+[Compile Tutorial](COMPILE.md)
+
+## Contribution
+If you want to make contributions to Paddle Serving Please refer to [CONRTIBUTE](CONTRIBUTE.md)
diff --git a/doc/README_CN.md b/doc/README_CN.md
index f8d42e6f1e72f1ac34939e5795df3e6604924bad..82a82622faffe7b3d8ccffea6e2108caa9e5b57c 100644
--- a/doc/README_CN.md
+++ b/doc/README_CN.md
@@ -1,5 +1,7 @@
# Paddle Serving
+(简体中文|[English](./README.md))
+
Paddle Serving是PaddlePaddle的在线预估服务框架,能够帮助开发者轻松实现从移动端、服务器端调用深度学习模型的远程预测服务。当前Paddle Serving以支持PaddlePaddle训练的模型为主,可以与Paddle训练框架联合使用,快速部署预估服务。Paddle Serving围绕常见的工业级深度学习模型部署场景进行设计,一些常见的功能包括多模型管理、模型热加载、基于[Baidu-rpc](https://github.com/apache/incubator-brpc)的高并发低延迟响应能力、在线模型A/B实验等。与Paddle训练框架互相配合的API可以使用户在训练与远程部署之间无缝过度,提升深度学习模型的落地效率。
------------
@@ -10,7 +12,7 @@ Paddle Serving当前的develop版本支持轻量级Python API进行快速预测
#### 安装
-强烈建议您在Docker内构建Paddle Serving,请查看[如何在Docker中运行PaddleServing](doc/RUN_IN_DOCKER_CN.md)
+强烈建议您在Docker内构建Paddle Serving,请查看[如何在Docker中运行PaddleServing](RUN_IN_DOCKER_CN.md)
```
pip install paddle-serving-client
@@ -105,13 +107,13 @@ for data in test_reader():
### 文档
-[设计文档](doc/DESIGN.md)
+[设计文档](DESIGN_CN.md)
-[FAQ](doc/FAQ.md)
+[FAQ](FAQ.md)
### 资深开发者使用指南
-[编译指南](doc/INSTALL.md)
+[编译指南](COMPILE_CN.md)
## 贡献
-如果你想要给Paddle Serving做贡献,请参考[贡献指南](doc/CONTRIBUTE.md)
+如果你想要给Paddle Serving做贡献,请参考[贡献指南](CONTRIBUTE.md)
diff --git a/doc/SAVE.md b/doc/SAVE.md
index 59464a4e7c1931291d4a21b8d9d802a07dd22ec6..c1e6b19a45c75a64207802984f52c734d44f8fc8 100644
--- a/doc/SAVE.md
+++ b/doc/SAVE.md
@@ -1,4 +1,7 @@
## How to save a servable model of Paddle Serving?
+
+([简体中文](./SAVE_CN.md)|English)
+
- Currently, paddle serving provides a save_model interface for users to access, the interface is similar with `save_inference_model` of Paddle.
``` python
import paddle_serving_client.io as serving_io
diff --git a/doc/SAVE_CN.md b/doc/SAVE_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..0e2ecd5b71b860e887027564940e9e64522e097f
--- /dev/null
+++ b/doc/SAVE_CN.md
@@ -0,0 +1,31 @@
+## 怎样保存用于Paddle Serving的模型?
+
+(简体中文|[English](./SAVE.md))
+
+- 目前,Paddle服务提供了一个save_model接口供用户访问,该接口与Paddle的`save_inference_model`类似。
+
+``` python
+import paddle_serving_client.io as serving_io
+serving_io.save_model("imdb_model", "imdb_client_conf",
+ {"words": data}, {"prediction": prediction},
+ fluid.default_main_program())
+```
+imdb_model是具有服务配置的服务器端模型。 imdb_client_conf是客户端rpc配置。 Serving有一个 提供给用户存放Feed和Fetch变量信息的字典。 在示例中,`{words”:data}` 是用于指定已保存推理模型输入的提要字典。`{"prediction":projection}`是指定保存的推理模型输出的字典。可以为feed和fetch变量定义一个别名。 如何使用别名的例子 示例如下:
+
+ ``` python
+ from paddle_serving_client import Client
+import sys
+
+client = Client()
+client.load_client_config(sys.argv[1])
+client.connect(["127.0.0.1:9393"])
+
+for line in sys.stdin:
+ group = line.strip().split()
+ words = [int(x) for x in group[1:int(group[0]) + 1]]
+ label = [int(group[-1])]
+ feed = {"words": words, "label": label}
+ fetch = ["acc", "cost", "prediction"]
+ fetch_map = client.predict(feed=feed, fetch=fetch)
+ print("{} {}".format(fetch_map["prediction"][1], label[0]))
+ ```
diff --git a/doc/SERVER_DAG.md b/doc/SERVER_DAG.md
index fd15140f183dac1d414c6fffe8df250500db24b3..fdfcec948e3224ba53c4ab09d0551b3df205e8aa 100644
--- a/doc/SERVER_DAG.md
+++ b/doc/SERVER_DAG.md
@@ -1,5 +1,7 @@
# Computation Graph On Server
+([简体中文](./SERVER_DAG_CN.md)|English)
+
This document shows the concept of computation graph on server. How to define computation graph with PaddleServing built-in operators. Examples for some sequential execution logics are shown as well.
## Computation Graph on Server
diff --git a/doc/SERVER_DAG_CN.md b/doc/SERVER_DAG_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..3bf42ef8e3fbcb8c509a69bfe6aea12f78dc4567
--- /dev/null
+++ b/doc/SERVER_DAG_CN.md
@@ -0,0 +1,58 @@
+# Server端的计算图
+
+(简体中文|[English](./SERVER_DAG.md))
+
+本文档显示了Server端上计算图的概念。 如何使用PaddleServing内置运算符定义计算图。 还显示了一些顺序执行逻辑的示例。
+
+## Server端的计算图
+
+深度神经网络通常在输入数据上有一些预处理步骤,而在模型推断分数上有一些后处理步骤。 由于深度学习框架现在非常灵活,因此可以在训练计算图之外进行预处理和后处理。 如果要在服务器端进行输入数据预处理和推理结果后处理,则必须在服务器上添加相应的计算逻辑。 此外,如果用户想在多个模型上使用相同的输入进行推理,则最好的方法是在仅提供一个客户端请求的情况下在服务器端同时进行推理,这样我们可以节省一些网络计算开销。 由于以上两个原因,自然而然地将有向无环图(DAG)视为服务器推理的主要计算方法。 DAG的一个示例如下:
+
+
+
+
+
+## 如何定义节点
+
+PaddleServing在框架中具有一些预定义的计算节点。 一种非常常用的计算图是简单的reader-infer-response模式,可以涵盖大多数单一模型推理方案。 示例图和相应的DAG定义代码如下。
+
+
+
+
+``` python
+import paddle_serving_server as serving
+op_maker = serving.OpMaker()
+read_op = op_maker.create('general_reader')
+general_infer_op = op_maker.create('general_infer')
+general_response_op = op_maker.create('general_response')
+
+op_seq_maker = serving.OpSeqMaker()
+op_seq_maker.add_op(read_op)
+op_seq_maker.add_op(general_infer_op)
+op_seq_maker.add_op(general_response_op)
+```
+
+由于该代码在大多数情况下都会被使用,并且用户不必更改代码,因此PaddleServing会发布一个易于使用的启动命令来启动服务。 示例如下:
+
+``` python
+python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9292
+```
+
+## 更多示例
+
+如果用户将稀疏特征作为输入,并且模型将对每个特征进行嵌入查找,则我们可以进行分布式嵌入查找操作,该操作不在Paddle训练计算图中。 示例如下:
+
+``` python
+import paddle_serving_server as serving
+op_maker = serving.OpMaker()
+read_op = op_maker.create('general_reader')
+dist_kv_op = op_maker.create('general_dist_kv')
+general_infer_op = op_maker.create('general_infer')
+general_response_op = op_maker.create('general_response')
+
+op_seq_maker = serving.OpSeqMaker()
+op_seq_maker.add_op(read_op)
+op_seq_maker.add_op(dist_kv_op)
+op_seq_maker.add_op(general_infer_op)
+op_seq_maker.add_op(general_response_op)
+```
diff --git a/doc/TRAIN_TO_SERVICE.md b/doc/TRAIN_TO_SERVICE.md
index 11e64eebed84be9889f6e833511bdade897aeb23..a5773accae5d135cdfad4c978656a667f442ff8e 100644
--- a/doc/TRAIN_TO_SERVICE.md
+++ b/doc/TRAIN_TO_SERVICE.md
@@ -1,36 +1,36 @@
-# 端到端完成从训练到部署全流程
+# End-to-end process from training to deployment
-Paddle Serving是Paddle的高性能在线预测服务框架,可以灵活支持大多数模型的部署。本文中将以IMDB评论情感分析任务为例通过9步展示从模型的训练到部署预测服务的全流程。
+([简体中文](./TRAIN_TO_SERVICE_CN.md)|English)
-## Step1:准备环境
+Paddle Serving is Paddle's high-performance online prediction service framework, which can flexibly support the deployment of most models. In this article, the IMDB review sentiment analysis task is used as an example to show the entire process from model training to deployment of prediction service through 9 steps.
-Paddle Serving可以部署在Centos和Ubuntu等Linux环境上,在其他系统上或者不希望安装serving模块的环境中仍然可以通过http服务来访问server端的预测服务。
+## Step1:Prepare for Running Environment
+Paddle Serving can be deployed on Linux environments such as Centos and Ubuntu. On other systems or in environments where you do not want to install the serving module, you can still access the server-side prediction service through the http service.
-可以根据需求和机器环境来选择安装cpu或gpu版本的server模块,在client端机器上安装client模块。当希望同http来访问server端
+You can choose to install the cpu or gpu version of the server module according to the requirements and machine environment, and install the client module on the client machine. When you want to access the server with http
```shell
-pip install paddle_serving_server #cpu版本server端
-pip install paddle_serving_server_gpu #gpu版本server端
-pip install paddle_serving_client #client端
+pip install paddle_serving_server #cpu version server side
+pip install paddle_serving_server_gpu #gpu version server side
+pip install paddle_serving_client #client version
```
-简单准备后,我们将以IMDB评论情感分析任务为例,展示从模型训练到部署预测服务的流程。示例中的所有代码都可以在Paddle Serving代码库的[IMDB示例](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imdb)中找到,示例中使用的数据和词典文件可以通过执行IMDB示例代码中的get_data.sh脚本得到。
+After simple preparation, we will take the IMDB review sentiment analysis task as an example to show the process from model training to deployment of prediction services. All the code in the example can be found in the [IMDB example](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imdb) of the Paddle Serving code base, the data and dictionary used in the example The file can be obtained by executing the get_data.sh script in the IMDB sample code.
-## Step2:确定任务和原始数据格式
+## Step2:Determine Tasks and Raw Data Format
-IMDB评论情感分析任务是对电影评论的内容进行二分类,判断该评论是属于正面评论还是负面评论。
-
-首先我们来看一下原始的数据:
+IMDB review sentiment analysis task is to classify the content of movie reviews to determine whether the review is a positive review or a negative review.
+First let's take a look at the raw data:
```
saw a trailer for this on another video, and decided to rent when it came out. boy, was i disappointed! the story is extremely boring, the acting (aside from christopher walken) is bad, and i couldn't care less about the characters, aside from really wanting to see nora's husband get thrashed. christopher walken's role is such a throw-away, what a tease! | 0
```
-这是一条英文评论样本,样本中使用|作为分隔符,分隔符之前为评论的内容,分隔符之后是样本的标签,0代表负样本,即负面评论,1代表正样本,即正面评论。
+This is a sample of English comments. The sample uses | as the separator. The content of the comment is before the separator. The label is the sample after the separator. 0 is the negative while 1 is the positive.
-## Step3:定义Reader,划分训练集、测试集
+## Step3:Define Reader, divide training set and test set
-对于原始文本我们需要将它转化为神经网络可以使用的数字id。imdb_reader.py脚本中定义了文本id化的方法,通过词典文件imdb.vocab将单词映射为整形数。
+For the original text we need to convert it to a numeric id that the neural network can use. The imdb_reader.py script defines the method of text idization, and the words are mapped to integers through the dictionary file imdb.vocab.
imdb_reader.py
@@ -102,17 +102,17 @@ class IMDBDataset(dg.MultiSlotDataGenerator):
```
-映射之后的样本类似于以下的格式:
+The sample after mapping is similar to the following format:
```
257 142 52 898 7 0 12899 1083 824 122 89527 134 6 65 47 48 904 89527 13 0 87 170 8 248 9 15 4 25 1365 4360 89527 702 89527 1 89527 240 3 28 89527 19 7 0 216 219 614 89527 0 84 89527 225 3 0 15 67 2356 89527 0 498 117 2 314 282 7 38 1097 89527 1 0 174 181 38 11 71 198 44 1 3110 89527 454 89527 34 37 89527 0 15 5912 80 2 9856 7748 89527 8 421 80 9 15 14 55 2218 12 4 45 6 58 25 89527 154 119 224 41 0 151 89527 871 89527 505 89527 501 89527 29 2 773 211 89527 54 307 90 0 893 89527 9 407 4 25 2 614 15 46 89527 89527 71 8 1356 35 89527 12 0 89527 89527 89 527 577 374 3 39091 22950 1 3771 48900 95 371 156 313 89527 37 154 296 4 25 2 217 169 3 2759 7 0 15 89527 0 714 580 11 2094 559 34 0 84 539 89527 1 0 330 355 3 0 15 15607 935 80 0 5369 3 0 622 89527 2 15 36 9 2291 2 7599 6968 2449 89527 1 454 37 256 2 211 113 0 480 218 1152 700 4 1684 1253 352 10 2449 89527 39 4 1819 129 1 316 462 29 0 12957 3 6 28 89527 13 0 457 8952 7 225 89527 8 2389 0 1514 89527 1
```
-这样神经网络就可以将转化后的文本信息作为特征值进行训练。
+In this way, the neural network can train the transformed text information as feature values.
-## Step4:定义CNN网络进行训练并保存
+## Step4:Define CNN network for training and saving
-接下来我们使用[CNN模型](https://www.paddlepaddle.org.cn/documentation/docs/zh/user_guides/nlp_case/understand_sentiment/README.cn.html#cnn)来进行训练。在nets.py脚本中定义网络结构。
+Net we use [CNN Model](https://www.paddlepaddle.org.cn/documentation/docs/zh/user_guides/nlp_case/understand_sentiment/README.cn.html#cnn) for training, in nets.py we define the network structure.
nets.py
@@ -156,7 +156,7 @@ def cnn_net(data,
-使用训练样本进行训练,训练脚本为local_train.py。在训练结束后使用paddle_serving_client.io.save_model函数来保存部署预测服务使用的模型文件和配置文件。
+Use training dataset for training. The training script is local_train.py. After training, use the paddle_serving_client.io.save_model function to save the model files and configuration files used by the servingdeployment.
local_train.py
@@ -172,7 +172,7 @@ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
-# 加载词典文件
+# load dict file
def load_vocab(filename):
vocab = {}
with open(filename) as f:
@@ -190,11 +190,11 @@ if __name__ == "__main__":
vocab = load_vocab('imdb.vocab')
dict_dim = len(vocab)
- #定义模型输入
+ #define model input
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
- #定义dataset,train_data为训练数据目录
+ #define dataset,train_data is the dataset directory
dataset = fluid.DatasetFactory().create_dataset()
filelist = ["train_data/%s" % x for x in os.listdir("train_data")]
dataset.set_use_var([data, label])
@@ -203,11 +203,11 @@ if __name__ == "__main__":
dataset.set_batch_size(4)
dataset.set_filelist(filelist)
dataset.set_thread(10)
- #定义模型
+ #define model
avg_cost, acc, prediction = cnn_net(data, label, dict_dim)
optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_cost)
- #执行训练
+ #execute training
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
epochs = 100
@@ -219,7 +219,7 @@ if __name__ == "__main__":
program=fluid.default_main_program(), dataset=dataset, debug=False)
logger.info("TRAIN --> pass: {}".format(i))
if i == 64:
- #在训练结束时使用PaddleServing中的模型保存接口保存出Serving所需的模型和配置文件
+ #At the end of training, use the model save interface in PaddleServing to save the models and configuration files required by Serving
serving_io.save_model("{}_model".format(model_name),
"{}_client_conf".format(model_name),
{"words": data}, {"prediction": prediction},
@@ -228,32 +228,32 @@ if __name__ == "__main__":
-![训练过程](./imdb_loss.png)由上图可以看出模型的损失在第65轮之后开始收敛,我们在第65轮训练完成后保存模型和配置文件。保存的文件分为imdb_cnn_client_conf和imdb_cnn_model文件夹,前者包含client端的配置文件,后者包含server端的配置文件和保存的模型文件。
-save_model函数的参数列表如下:
+! [Training process](./ imdb_loss.png) As can be seen from the above figure, the loss of the model starts to converge after the 65th round. We save the model and configuration file after the 65th round of training is completed. The saved files are divided into imdb_cnn_client_conf and imdb_cnn_model folders. The former contains client-side configuration files, and the latter contains server-side configuration files and saved model files.
+The parameter list of the save_model function is as follows:
-| 参数 | 含义 |
+| Parameter | Meaning |
| -------------------- | ------------------------------------------------------------ |
-| server_model_folder | 保存server端配置文件和模型文件的目录 |
-| client_config_folder | 保存client端配置文件的目录 |
-| feed_var_dict | 用于预测的模型的输入,dict类型,key可以自定义,value为模型中的input variable,每个key对应一个variable,使用预测服务时,输入数据使用key作为输入的名称 |
-| fetch_var_dict | 用于预测的模型的输出,dict类型,key可以自定义,value为模型中的input variable,每个key对应一个variable,使用预测服务时,通过key来获取返回数据 |
-| main_program | 模型的program |
+| server_model_folder | Directory for server-side configuration files and model files |
+| client_config_folder | Directory for saving client configuration files |
+| feed_var_dict | The input of the inference model. The dict type and key can be customized. The value is the input variable in the model. Each key corresponds to a variable. When using the prediction service, the input data uses the key as the input name. |
+| fetch_var_dict | The output of the model used for prediction, dict type, key can be customized, value is the input variable in the model, and each key corresponds to a variable. When using the prediction service, use the key to get the returned data |
+| main_program | Model's program |
-## Step5:部署RPC预测服务
+## Step5: Deploy RPC Prediction Service
-Paddle Serving框架支持两种预测服务方式,一种是通过RPC进行通信,一种是通过HTTP进行通信,下面将先介绍RPC预测服务的部署和使用方法,在Step8开始介绍HTTP预测服务的部署和使用。
+The Paddle Serving framework supports two types of prediction service methods. One is to communicate through RPC and the other is to communicate through HTTP. The deployment and use of RPC prediction service will be introduced first. The deployment and use of HTTP prediction service will be introduced at Step 8. .
-```shell
-python -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292 #cpu预测服务
-python -m paddle_serving_server_gpu.serve --model imdb_cnn_model/ --port 9292 --gpu_ids 0 #gpu预测服务
-```
+`` `shell
+python -m paddle_serving_server.serve --model imdb_cnn_model / --port 9292 #cpu prediction service
+python -m paddle_serving_server_gpu.serve --model imdb_cnn_model / --port 9292 --gpu_ids 0 #gpu prediction service
+`` `
-命令中参数--model 指定在之前保存的server端的模型和配置文件目录,--port指定预测服务的端口,当使用gpu版本部署gpu预测服务时可以使用--gpu_ids指定使用的gpu 。
+The parameter --model in the command specifies the server-side model and configuration file directory previously saved, --port specifies the port of the prediction service. When deploying the gpu prediction service using the gpu version, you can use --gpu_ids to specify the gpu used.
-执行完以上命令之一,就完成了IMDB 情感分析任务的RPC预测服务部署。
+After executing one of the above commands, the RPC prediction service deployment of the IMDB sentiment analysis task is completed.
-## Step6:复用Reader,定义远程RPC客户端
-下面我们通过Python代码来访问RPC预测服务,脚本为test_client.py
+## Step6: Reuse Reader, define remote RPC client
+Below we access the RPC prediction service through Python code, the script is test_client.py
test_client.py
@@ -267,7 +267,7 @@ client = Client()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9292"])
-#在这里复用了数据预处理部分的代码将原始文本转换成数字id
+#The code of the data preprocessing part is reused here to convert the original text into a numeric id
imdb_dataset = IMDBDataset()
imdb_dataset.load_resource(sys.argv[2])
@@ -281,30 +281,29 @@ for line in sys.stdin:
-脚本从标准输入接收数据,并打印出样本预测为1的概率与真实的label。
+The script receives data from standard input and prints out the probability that the sample whose infer result is 1 and its real label.
-## Step7:调用RPC服务,测试模型效果
+## Step7: Call the RPC service to test the model effect
-以上一步实现的客户端为例运行预测服务,使用方式如下:
-
-```shell
-cat test_data/part-0 | python test_client.py imdb_lstm_client_conf/serving_client_conf.prototxt imdb.vocab
-```
+The client implemented in the previous step runs the prediction service as an example. The usage method is as follows:
-使用test_data/part-0文件中的2084个样本进行测试测试,模型预测的准确率为88.19%。
+`` `shell
+cat test_data/part-0 | python test_client.py imdb_lstm_client_conf / serving_client_conf.prototxt imdb.vocab
+`` `
-**注意**:每次模型训练的效果可能略有不同,使用训练出的模型预测的准确率会与示例中接近但有可能不完全一致。
+Using 2084 samples in the test_data/part-0 file for test testing, the model prediction accuracy is 88.19%.
-## Step8:部署HTTP预测服务
+** Note **: The effect of each model training may be slightly different, and the accuracy of predictions using the trained model will be close to the examples but may not be exactly the same.
-使用HTTP预测服务时,client端不需要安装Paddle Serving的任何模块,仅需要能发送HTTP请求即可。当然HTTP的通信方式会相较于RPC的通信方式在通信阶段消耗更多的时间。
+## Step8: Deploy HTTP Prediction Service
-对于IMDB情感分析任务原始文本在预测之前需要进行预处理,在RPC预测服务中我们将预处理放在client的脚本中,而在HTTP预测服务中我们将预处理放在server端。Paddle Serving的HTTP预测服务框架为这种情况准备了数据预处理和后处理的接口,我们只要根据任务需要重写即可。
+When using the HTTP prediction service, the client does not need to install any modules of Paddle Serving, it only needs to be able to send HTTP requests. Of course, the HTTP method consumes more time in the communication phase than the RPC method.
-Serving提供了示例代码,通过执行[IMDB示例](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imdb)中的imdb_web_service_demo.sh脚本来获取。
+For the IMDB sentiment analysis task, the original text needs to be preprocessed before prediction. In the RPC prediction service, we put the preprocessing in the client's script, and in the HTTP prediction service, we put the preprocessing on the server. Paddle Serving's HTTP prediction service framework prepares data pre-processing and post-processing interfaces for this situation. We just need to rewrite it according to the needs of the task.
-下面我们来看一下启动HTTP预测服务的脚本text_classify_service.py。
+Serving provides sample code, which is obtained by executing the imdb_web_service_demo.sh script in [IMDB Example](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imdb).
+Let's take a look at the script text_classify_service.py that starts the HTTP prediction service.
text_clssify_service.py
@@ -313,7 +312,7 @@ from paddle_serving_server.web_service import WebService
from imdb_reader import IMDBDataset
import sys
-#继承框架中的WebService类
+#extend class WebService
class IMDBService(WebService):
def prepare_dict(self, args={}):
if len(args) == 0:
@@ -321,7 +320,7 @@ class IMDBService(WebService):
self.dataset = IMDBDataset()
self.dataset.load_resource(args["dict_file_path"])
- #重写preprocess方法来实现数据预处理,这里也复用了训练时使用的reader脚本
+ #rewrite preprocess() to implement data preprocessing, here we reuse reader script for training
def preprocess(self, feed={}, fetch=[]):
if "words" not in feed:
exit(-1)
@@ -329,7 +328,7 @@ class IMDBService(WebService):
res_feed["words"] = self.dataset.get_words_only(feed["words"])[0]
return res_feed, fetch
-#这里需要使用name参数指定预测服务的名称,
+#Here you need to use the name parameter to specify the name of the prediction service.
imdb_service = IMDBService(name="imdb")
imdb_service.load_model_config(sys.argv[1])
imdb_service.prepare_server(
@@ -339,24 +338,24 @@ imdb_service.run_server()
```
-启动命令
+run
```shell
python text_classify_service.py imdb_cnn_model/ workdir/ 9292 imdb.vocab
```
-以上命令中参数1为保存的server端模型和配置文件,参数2为工作目录会保存一些预测服务工作时的配置文件,该目录可以不存在但需要指定名称,预测服务会自行创建,参数3为端口号,参数4为词典文件。
+In the above command, the first parameter is the saved server-side model and configuration file. The second parameter is the working directory, which will save some configuration files for the prediction service. The directory may not exist but needs to be specified. The prediction service will be created by itself. the third parameter is Port number, the fourth parameter is the dictionary file.
-## Step9:明文数据调用预测服务
-启动完HTTP预测服务,即可通过一行命令进行预测:
+## Step9: Call the prediction service with plaintext data
+After starting the HTTP prediction service, you can make prediction with a single command:
-```
-curl -H "Content-Type:application/json" -X POST -d '{"words": "i am very sad | 0", "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction
-```
-预测流程正常时,会返回预测概率,示例如下。
+`` `
+curl -H "Content-Type: application / json" -X POST -d '{"words": "i am very sad | 0", "fetch": ["prediction"]}' http://127.0.0.1:9292/imdb/prediction
+`` `
+When the inference process is normal, the prediction probability is returned, as shown below.
-```
-{"prediction":[0.5592559576034546,0.44074398279190063]}
-```
+`` `
+{"prediction": [0.5592559576034546,0.44074398279190063]}
+`` `
-**注意**:每次模型训练的效果可能略有不同,使用训练出的模型预测概率数值可能与示例不一致。
+** Note **: The effect of each model training may be slightly different, and the inferred probability value using the trained model may not be consistent with the example.
diff --git a/doc/TRAIN_TO_SERVICE_CN.md b/doc/TRAIN_TO_SERVICE_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..8349723fb3a749efcbcc5887ff5f7ba1ede7ad65
--- /dev/null
+++ b/doc/TRAIN_TO_SERVICE_CN.md
@@ -0,0 +1,364 @@
+# 端到端完成从训练到部署全流程
+
+(简体中文|[English](./TRAIN_TO_SERVICE.md))
+
+Paddle Serving是Paddle的高性能在线预测服务框架,可以灵活支持大多数模型的部署。本文中将以IMDB评论情感分析任务为例通过9步展示从模型的训练到部署预测服务的全流程。
+
+## Step1:准备环境
+
+Paddle Serving可以部署在Centos和Ubuntu等Linux环境上,在其他系统上或者不希望安装serving模块的环境中仍然可以通过http服务来访问server端的预测服务。
+
+可以根据需求和机器环境来选择安装cpu或gpu版本的server模块,在client端机器上安装client模块。当希望同http来访问server端
+
+```shell
+pip install paddle_serving_server #cpu版本server端
+pip install paddle_serving_server_gpu #gpu版本server端
+pip install paddle_serving_client #client端
+```
+
+简单准备后,我们将以IMDB评论情感分析任务为例,展示从模型训练到部署预测服务的流程。示例中的所有代码都可以在Paddle Serving代码库的[IMDB示例](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imdb)中找到,示例中使用的数据和词典文件可以通过执行IMDB示例代码中的get_data.sh脚本得到。
+
+## Step2:确定任务和原始数据格式
+
+IMDB评论情感分析任务是对电影评论的内容进行二分类,判断该评论是属于正面评论还是负面评论。
+
+首先我们来看一下原始的数据:
+
+```
+saw a trailer for this on another video, and decided to rent when it came out. boy, was i disappointed! the story is extremely boring, the acting (aside from christopher walken) is bad, and i couldn't care less about the characters, aside from really wanting to see nora's husband get thrashed. christopher walken's role is such a throw-away, what a tease! | 0
+```
+
+这是一条英文评论样本,样本中使用|作为分隔符,分隔符之前为评论的内容,分隔符之后是样本的标签,0代表负样本,即负面评论,1代表正样本,即正面评论。
+
+## Step3:定义Reader,划分训练集、测试集
+
+对于原始文本我们需要将它转化为神经网络可以使用的数字id。imdb_reader.py脚本中定义了文本id化的方法,通过词典文件imdb.vocab将单词映射为整形数。
+
+
+ imdb_reader.py
+
+```python
+import sys
+import os
+import paddle
+import re
+import paddle.fluid.incubate.data_generator as dg
+
+
+class IMDBDataset(dg.MultiSlotDataGenerator):
+ def load_resource(self, dictfile):
+ self._vocab = {}
+ wid = 0
+ with open(dictfile) as f:
+ for line in f:
+ self._vocab[line.strip()] = wid
+ wid += 1
+ self._unk_id = len(self._vocab)
+ self._pattern = re.compile(r'(;|,|\.|\?|!|\s|\(|\))')
+ self.return_value = ("words", [1, 2, 3, 4, 5, 6]), ("label", [0])
+
+ def get_words_only(self, line):
+ sent = line.lower().replace("
", " ").strip()
+ words = [x for x in self._pattern.split(sent) if x and x != " "]
+ feas = [
+ self._vocab[x] if x in self._vocab else self._unk_id for x in words
+ ]
+ return feas
+
+ def get_words_and_label(self, line):
+ send = '|'.join(line.split('|')[:-1]).lower().replace("
",
+ " ").strip()
+ label = [int(line.split('|')[-1])]
+
+ words = [x for x in self._pattern.split(send) if x and x != " "]
+ feas = [
+ self._vocab[x] if x in self._vocab else self._unk_id for x in words
+ ]
+ return feas, label
+
+ def infer_reader(self, infer_filelist, batch, buf_size):
+ def local_iter():
+ for fname in infer_filelist:
+ with open(fname, "r") as fin:
+ for line in fin:
+ feas, label = self.get_words_and_label(line)
+ yield feas, label
+
+ import paddle
+ batch_iter = paddle.batch(
+ paddle.reader.shuffle(
+ local_iter, buf_size=buf_size),
+ batch_size=batch)
+ return batch_iter
+
+ def generate_sample(self, line):
+ def memory_iter():
+ for i in range(1000):
+ yield self.return_value
+
+ def data_iter():
+ feas, label = self.get_words_and_label(line)
+ yield ("words", feas), ("label", label)
+
+ return data_iter
+```
+
+
+映射之后的样本类似于以下的格式:
+
+```
+257 142 52 898 7 0 12899 1083 824 122 89527 134 6 65 47 48 904 89527 13 0 87 170 8 248 9 15 4 25 1365 4360 89527 702 89527 1 89527 240 3 28 89527 19 7 0 216 219 614 89527 0 84 89527 225 3 0 15 67 2356 89527 0 498 117 2 314 282 7 38 1097 89527 1 0 174 181 38 11 71 198 44 1 3110 89527 454 89527 34 37 89527 0 15 5912 80 2 9856 7748 89527 8 421 80 9 15 14 55 2218 12 4 45 6 58 25 89527 154 119 224 41 0 151 89527 871 89527 505 89527 501 89527 29 2 773 211 89527 54 307 90 0 893 89527 9 407 4 25 2 614 15 46 89527 89527 71 8 1356 35 89527 12 0 89527 89527 89 527 577 374 3 39091 22950 1 3771 48900 95 371 156 313 89527 37 154 296 4 25 2 217 169 3 2759 7 0 15 89527 0 714 580 11 2094 559 34 0 84 539 89527 1 0 330 355 3 0 15 15607 935 80 0 5369 3 0 622 89527 2 15 36 9 2291 2 7599 6968 2449 89527 1 454 37 256 2 211 113 0 480 218 1152 700 4 1684 1253 352 10 2449 89527 39 4 1819 129 1 316 462 29 0 12957 3 6 28 89527 13 0 457 8952 7 225 89527 8 2389 0 1514 89527 1
+```
+
+这样神经网络就可以将转化后的文本信息作为特征值进行训练。
+
+## Step4:定义CNN网络进行训练并保存
+
+接下来我们使用[CNN模型](https://www.paddlepaddle.org.cn/documentation/docs/zh/user_guides/nlp_case/understand_sentiment/README.cn.html#cnn)来进行训练。在nets.py脚本中定义网络结构。
+
+
+ nets.py
+
+```python
+import sys
+import time
+import numpy as np
+
+import paddle
+import paddle.fluid as fluid
+
+def cnn_net(data,
+ label,
+ dict_dim,
+ emb_dim=128,
+ hid_dim=128,
+ hid_dim2=96,
+ class_dim=2,
+ win_size=3):
+ """ conv net. """
+ emb = fluid.layers.embedding(
+ input=data, size=[dict_dim, emb_dim], is_sparse=True)
+
+ conv_3 = fluid.nets.sequence_conv_pool(
+ input=emb,
+ num_filters=hid_dim,
+ filter_size=win_size,
+ act="tanh",
+ pool_type="max")
+
+ fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2)
+
+ prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act="softmax")
+ cost = fluid.layers.cross_entropy(input=prediction, label=label)
+ avg_cost = fluid.layers.mean(x=cost)
+ acc = fluid.layers.accuracy(input=prediction, label=label)
+
+ return avg_cost, acc, prediction
+```
+
+
+
+使用训练样本进行训练,训练脚本为local_train.py。在训练结束后使用paddle_serving_client.io.save_model函数来保存部署预测服务使用的模型文件和配置文件。
+
+
+ local_train.py
+
+```python
+import os
+import sys
+import paddle
+import logging
+import paddle.fluid as fluid
+
+logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
+logger = logging.getLogger("fluid")
+logger.setLevel(logging.INFO)
+
+# 加载词典文件
+def load_vocab(filename):
+ vocab = {}
+ with open(filename) as f:
+ wid = 0
+ for line in f:
+ vocab[line.strip()] = wid
+ wid += 1
+ vocab[""] = len(vocab)
+ return vocab
+
+
+if __name__ == "__main__":
+ from nets import cnn_net
+ model_name = "imdb_cnn"
+ vocab = load_vocab('imdb.vocab')
+ dict_dim = len(vocab)
+
+ #定义模型输入
+ data = fluid.layers.data(
+ name="words", shape=[1], dtype="int64", lod_level=1)
+ label = fluid.layers.data(name="label", shape=[1], dtype="int64")
+ #定义dataset,train_data为训练数据目录
+ dataset = fluid.DatasetFactory().create_dataset()
+ filelist = ["train_data/%s" % x for x in os.listdir("train_data")]
+ dataset.set_use_var([data, label])
+ pipe_command = "python imdb_reader.py"
+ dataset.set_pipe_command(pipe_command)
+ dataset.set_batch_size(4)
+ dataset.set_filelist(filelist)
+ dataset.set_thread(10)
+ #定义模型
+ avg_cost, acc, prediction = cnn_net(data, label, dict_dim)
+ optimizer = fluid.optimizer.SGD(learning_rate=0.001)
+ optimizer.minimize(avg_cost)
+ #执行训练
+ exe = fluid.Executor(fluid.CPUPlace())
+ exe.run(fluid.default_startup_program())
+ epochs = 100
+
+ import paddle_serving_client.io as serving_io
+
+ for i in range(epochs):
+ exe.train_from_dataset(
+ program=fluid.default_main_program(), dataset=dataset, debug=False)
+ logger.info("TRAIN --> pass: {}".format(i))
+ if i == 64:
+ #在训练结束时使用PaddleServing中的模型保存接口保存出Serving所需的模型和配置文件
+ serving_io.save_model("{}_model".format(model_name),
+ "{}_client_conf".format(model_name),
+ {"words": data}, {"prediction": prediction},
+ fluid.default_main_program())
+```
+
+
+
+![训练过程](./imdb_loss.png)由上图可以看出模型的损失在第65轮之后开始收敛,我们在第65轮训练完成后保存模型和配置文件。保存的文件分为imdb_cnn_client_conf和imdb_cnn_model文件夹,前者包含client端的配置文件,后者包含server端的配置文件和保存的模型文件。
+save_model函数的参数列表如下:
+
+| 参数 | 含义 |
+| -------------------- | ------------------------------------------------------------ |
+| server_model_folder | 保存server端配置文件和模型文件的目录 |
+| client_config_folder | 保存client端配置文件的目录 |
+| feed_var_dict | 用于预测的模型的输入,dict类型,key可以自定义,value为模型中的input variable,每个key对应一个variable,使用预测服务时,输入数据使用key作为输入的名称 |
+| fetch_var_dict | 用于预测的模型的输出,dict类型,key可以自定义,value为模型中的input variable,每个key对应一个variable,使用预测服务时,通过key来获取返回数据 |
+| main_program | 模型的program |
+
+## Step5:部署RPC预测服务
+
+Paddle Serving框架支持两种预测服务方式,一种是通过RPC进行通信,一种是通过HTTP进行通信,下面将先介绍RPC预测服务的部署和使用方法,在Step8开始介绍HTTP预测服务的部署和使用。
+
+```shell
+python -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292 #cpu预测服务
+python -m paddle_serving_server_gpu.serve --model imdb_cnn_model/ --port 9292 --gpu_ids 0 #gpu预测服务
+```
+
+命令中参数--model 指定在之前保存的server端的模型和配置文件目录,--port指定预测服务的端口,当使用gpu版本部署gpu预测服务时可以使用--gpu_ids指定使用的gpu 。
+
+执行完以上命令之一,就完成了IMDB 情感分析任务的RPC预测服务部署。
+
+## Step6:复用Reader,定义远程RPC客户端
+下面我们通过Python代码来访问RPC预测服务,脚本为test_client.py
+
+
+ test_client.py
+
+```python
+from paddle_serving_client import Client
+from imdb_reader import IMDBDataset
+import sys
+
+client = Client()
+client.load_client_config(sys.argv[1])
+client.connect(["127.0.0.1:9292"])
+
+#在这里复用了数据预处理部分的代码将原始文本转换成数字id
+imdb_dataset = IMDBDataset()
+imdb_dataset.load_resource(sys.argv[2])
+
+for line in sys.stdin:
+ word_ids, label = imdb_dataset.get_words_and_label(line)
+ feed = {"words": word_ids}
+ fetch = ["acc", "cost", "prediction"]
+ fetch_map = client.predict(feed=feed, fetch=fetch)
+ print("{} {}".format(fetch_map["prediction"][1], label[0]))
+```
+
+
+
+脚本从标准输入接收数据,并打印出样本预测为1的概率与真实的label。
+
+## Step7:调用RPC服务,测试模型效果
+
+以上一步实现的客户端为例运行预测服务,使用方式如下:
+
+```shell
+cat test_data/part-0 | python test_client.py imdb_lstm_client_conf/serving_client_conf.prototxt imdb.vocab
+```
+
+使用test_data/part-0文件中的2084个样本进行测试测试,模型预测的准确率为88.19%。
+
+**注意**:每次模型训练的效果可能略有不同,使用训练出的模型预测的准确率会与示例中接近但有可能不完全一致。
+
+## Step8:部署HTTP预测服务
+
+使用HTTP预测服务时,client端不需要安装Paddle Serving的任何模块,仅需要能发送HTTP请求即可。当然HTTP的通信方式会相较于RPC的通信方式在通信阶段消耗更多的时间。
+
+对于IMDB情感分析任务原始文本在预测之前需要进行预处理,在RPC预测服务中我们将预处理放在client的脚本中,而在HTTP预测服务中我们将预处理放在server端。Paddle Serving的HTTP预测服务框架为这种情况准备了数据预处理和后处理的接口,我们只要根据任务需要重写即可。
+
+Serving提供了示例代码,通过执行[IMDB示例](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imdb)中的imdb_web_service_demo.sh脚本来获取。
+
+下面我们来看一下启动HTTP预测服务的脚本text_classify_service.py。
+
+
+ text_clssify_service.py
+
+```python
+from paddle_serving_server.web_service import WebService
+from imdb_reader import IMDBDataset
+import sys
+
+#继承框架中的WebService类
+class IMDBService(WebService):
+ def prepare_dict(self, args={}):
+ if len(args) == 0:
+ exit(-1)
+ self.dataset = IMDBDataset()
+ self.dataset.load_resource(args["dict_file_path"])
+
+ #重写preprocess方法来实现数据预处理,这里也复用了训练时使用的reader脚本
+ def preprocess(self, feed={}, fetch=[]):
+ if "words" not in feed:
+ exit(-1)
+ res_feed = {}
+ res_feed["words"] = self.dataset.get_words_only(feed["words"])[0]
+ return res_feed, fetch
+
+#这里需要使用name参数指定预测服务的名称,
+imdb_service = IMDBService(name="imdb")
+imdb_service.load_model_config(sys.argv[1])
+imdb_service.prepare_server(
+ workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
+imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
+imdb_service.run_server()
+```
+
+
+启动命令
+
+```shell
+python text_classify_service.py imdb_cnn_model/ workdir/ 9292 imdb.vocab
+```
+
+以上命令中参数1为保存的server端模型和配置文件,参数2为工作目录会保存一些预测服务工作时的配置文件,该目录可以不存在但需要指定名称,预测服务会自行创建,参数3为端口号,参数4为词典文件。
+
+## Step9:明文数据调用预测服务
+启动完HTTP预测服务,即可通过一行命令进行预测:
+
+```
+curl -H "Content-Type:application/json" -X POST -d '{"words": "i am very sad | 0", "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction
+```
+预测流程正常时,会返回预测概率,示例如下。
+
+```
+{"prediction":[0.5592559576034546,0.44074398279190063]}
+```
+
+**注意**:每次模型训练的效果可能略有不同,使用训练出的模型预测概率数值可能与示例不一致。
diff --git a/doc/BENCHMARKING.md b/doc/deprecated/BENCHMARKING.md
similarity index 100%
rename from doc/BENCHMARKING.md
rename to doc/deprecated/BENCHMARKING.md
diff --git a/doc/CLIENT_CONFIGURE.md b/doc/deprecated/CLIENT_CONFIGURE.md
similarity index 100%
rename from doc/CLIENT_CONFIGURE.md
rename to doc/deprecated/CLIENT_CONFIGURE.md
diff --git a/doc/CLUSTERING.md b/doc/deprecated/CLUSTERING.md
similarity index 100%
rename from doc/CLUSTERING.md
rename to doc/deprecated/CLUSTERING.md
diff --git a/doc/CREATING.md b/doc/deprecated/CREATING.md
similarity index 100%
rename from doc/CREATING.md
rename to doc/deprecated/CREATING.md
diff --git a/doc/CTR_PREDICTION.md b/doc/deprecated/CTR_PREDICTION.md
similarity index 100%
rename from doc/CTR_PREDICTION.md
rename to doc/deprecated/CTR_PREDICTION.md
diff --git a/doc/FAQ.md b/doc/deprecated/FAQ.md
similarity index 100%
rename from doc/FAQ.md
rename to doc/deprecated/FAQ.md
diff --git a/doc/GETTING_STARTED.md b/doc/deprecated/GETTING_STARTED.md
similarity index 100%
rename from doc/GETTING_STARTED.md
rename to doc/deprecated/GETTING_STARTED.md
diff --git a/doc/HTTP_INTERFACE.md b/doc/deprecated/HTTP_INTERFACE.md
similarity index 100%
rename from doc/HTTP_INTERFACE.md
rename to doc/deprecated/HTTP_INTERFACE.md
diff --git a/doc/INDEX.md b/doc/deprecated/INDEX.md
similarity index 100%
rename from doc/INDEX.md
rename to doc/deprecated/INDEX.md
diff --git a/doc/MULTI_SERVING_OVER_SINGLE_GPU_CARD.md b/doc/deprecated/MULTI_SERVING_OVER_SINGLE_GPU_CARD.md
similarity index 100%
rename from doc/MULTI_SERVING_OVER_SINGLE_GPU_CARD.md
rename to doc/deprecated/MULTI_SERVING_OVER_SINGLE_GPU_CARD.md
diff --git a/doc/SERVING_CONFIGURE.md b/doc/deprecated/SERVING_CONFIGURE.md
similarity index 100%
rename from doc/SERVING_CONFIGURE.md
rename to doc/deprecated/SERVING_CONFIGURE.md