diff --git a/README.md b/README.md
index fb537b65db83d013f570c8208f21c219ca5084a3..5305056a737c60dcf83812e45f87d33eeb769155 100644
--- a/README.md
+++ b/README.md
@@ -55,11 +55,13 @@ You may need to use a domestic mirror source (in China, you can use the Tsinghua
If you need install modules compiled with develop branch, please download packages from [latest packages list](./doc/LATEST_PACKAGES.md) and install with `pip install` command.
-Packages of paddle-serving-server and paddle-serving-server-gpu support Centos 6/7 and Ubuntu 16/18.
+Packages of paddle-serving-server and paddle-serving-server-gpu support Centos 6/7, Ubuntu 16/18, Windows 10.
Packages of paddle-serving-client and paddle-serving-app support Linux and Windows, but paddle-serving-client only support python2.7/3.6/3.7.
-Recommended to install paddle >= 1.8.2.
+Recommended to install paddle >= 1.8.4.
+
+For **Windows Users**, please read the document [Paddle Serving for Windows Users](./doc/WINDOWS_TUTORIAL.md)
Pre-built services with Paddle Serving
diff --git a/README_CN.md b/README_CN.md
index 2c37a26681d4291adcf7e8e70d3392772fabbe6b..d1627c23b68e242f0fc79214dff578d47b589cbd 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -57,11 +57,13 @@ pip install paddle-serving-server-gpu==0.3.2.post10 # GPU with CUDA10.0
如果需要使用develop分支编译的安装包,请从[最新安装包列表](./doc/LATEST_PACKAGES.md)中获取下载地址进行下载,使用`pip install`命令进行安装。
-paddle-serving-server和paddle-serving-server-gpu安装包支持Centos 6/7和Ubuntu 16/18。
+paddle-serving-server和paddle-serving-server-gpu安装包支持Centos 6/7, Ubuntu 16/18和Windows 10。
paddle-serving-client和paddle-serving-app安装包支持Linux和Windows,其中paddle-serving-client仅支持python2.7/3.5/3.6。
-推荐安装1.8.2及以上版本的paddle
+推荐安装1.8.4及以上版本的paddle
+
+对于**Windows 10 用户**,请参考文档[Windows平台使用Paddle Serving指导](./doc/WINDOWS_TUTORIAL_CN.md)。
Paddle Serving预装的服务
diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake
index 4b7d3ed1f620bfcd2e1e214c49c57ee3848129e7..15076c15961e96317bf31647b1b64c6fee5ebd7d 100644
--- a/cmake/paddlepaddle.cmake
+++ b/cmake/paddlepaddle.cmake
@@ -114,7 +114,7 @@ ADD_LIBRARY(openblas STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET openblas PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/openblas/lib/libopenblas.a)
ADD_LIBRARY(paddle_fluid SHARED IMPORTED GLOBAL)
-SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.so)
+SET_PROPERTY(TARGET paddle_fluid PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/lib/libpaddle_fluid.a)
if (WITH_TRT)
ADD_LIBRARY(nvinfer SHARED IMPORTED GLOBAL)
@@ -127,12 +127,17 @@ endif()
ADD_LIBRARY(xxhash STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET xxhash PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/xxhash/lib/libxxhash.a)
+ADD_LIBRARY(cryptopp STATIC IMPORTED GLOBAL)
+SET_PROPERTY(TARGET cryptopp PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/cryptopp/lib/libcryptopp.a)
+
LIST(APPEND external_project_dependencies paddle)
LIST(APPEND paddle_depend_libs
- xxhash)
+ xxhash cryptopp)
+
if(WITH_TRT)
LIST(APPEND paddle_depend_libs
nvinfer nvinfer_plugin)
endif()
+
diff --git a/core/predictor/tools/seq_generator.cpp b/core/predictor/tools/seq_generator.cpp
index eb7e7ed7f9a609e0c21be9a2c3d686dd7d9a1abd..c667445c7cf380a0f085eaeec24f5201b3445c73 100644
--- a/core/predictor/tools/seq_generator.cpp
+++ b/core/predictor/tools/seq_generator.cpp
@@ -17,11 +17,11 @@
#include
#include
#include
-#include
+#include //NOLINT
#include "core/predictor/framework.pb.h"
-#include "quant.h"
-#include "seq_file.h"
+#include "quant.h" // NOLINT
+#include "seq_file.h" // NOLINT
inline uint64_t time_diff(const struct timeval &start_time,
const struct timeval &end_time) {
@@ -113,13 +113,15 @@ int dump_parameter(const char *input_file, const char *output_file) {
// std::cout << "key_len " << key_len << " value_len " << value_buf_len
// << std::endl;
memcpy(value_buf, tensor_buf + offset, value_buf_len);
- seq_file_writer.write((char *)&i, sizeof(i), value_buf, value_buf_len);
+ seq_file_writer.write(
+ std::to_string(i).c_str(), sizeof(i), value_buf, value_buf_len);
offset += value_buf_len;
}
return 0;
}
-float *read_embedding_table(const char *file1, std::vector &dims) {
+float *read_embedding_table(const char *file1,
+ std::vector &dims) { // NOLINT
std::ifstream is(file1);
// Step 1: is read version, os write version
uint32_t version;
@@ -242,7 +244,7 @@ int compress_parameter_parallel(const char *file1,
float x = *(emb_table + k * emb_size + e);
int val = round((x - xmin) / scale);
val = std::max(0, val);
- val = std::min((int)pow2bits - 1, val);
+ val = std::min(static_cast(pow2bits) - 1, val);
*(tensor_temp + 2 * sizeof(float) + e) = val;
}
result[k] = tensor_temp;
@@ -262,7 +264,8 @@ int compress_parameter_parallel(const char *file1,
}
SeqFileWriter seq_file_writer(file2);
for (int64_t i = 0; i < dict_size; i++) {
- seq_file_writer.write((char *)&i, sizeof(i), result[i], per_line_size);
+ seq_file_writer.write(
+ std::to_string(i).c_str(), sizeof(i), result[i], per_line_size);
}
return 0;
}
diff --git a/doc/WINDOWS_TUTORIAL.md b/doc/WINDOWS_TUTORIAL.md
new file mode 100644
index 0000000000000000000000000000000000000000..8d197df3e720495a2e93d21b02c2340126bb2813
--- /dev/null
+++ b/doc/WINDOWS_TUTORIAL.md
@@ -0,0 +1,126 @@
+## Paddle Serving for Windows Users
+
+(English|[简体中文](./WINDOWS_TUTORIAL_CN.md))
+
+### Summary
+
+This document guides users how to build Paddle Serving service on the Windows platform. Due to the limited support of third-party libraries, the Windows platform currently only supports the use of web services to build local predictor prediction services. If you want to experience all the services, you need to use Docker for Windows to simulate the operating environment of Linux.
+
+### Running Paddle Serving on Native Windows System
+
+**Configure Python environment variables to PATH**: First, you need to add the directory where the Python executable program is located to the PATH. Usually in **System Properties/My Computer Properties**-**Advanced**-**Environment Variables**, click Path and add the path at the beginning. For example, `C:\Users\$USER\AppData\Local\Programs\Python\Python36`, and finally click **OK** continuously. If you enter python on Powershell, you can enter the python interactive interface, indicating that the environment variable configuration is successful.
+
+**Install wget**: Because all the downloads in the tutorial and the built-in model download function in `paddle_serving_app` all use the wget tool, download the binary package at the [link](http://gnuwin32.sourceforge.net/packages/wget.htm), unzip and copy it to `C:\Windows\System32`, if there is a security prompt, you need to pass it.
+
+**Install Git**: For details, see [Git official website](https://git-scm.com/downloads)
+
+**Install the necessary C++ library (optional)**: Some users may encounter the problem that the dll cannot be linked during the `import paddle` stage. It is recommended to [Install Visual Studio Community Edition](https://visualstudio.microsoft.com/), and install the relevant components of C++.
+
+**Install Paddle and Serving**: In Powershell, execute
+
+```
+python -m pip install -U paddle_serving_server paddle_serving_client paddle_serving_app paddlepaddle`
+```
+
+for GPU users,
+
+```
+python -m pip install -U paddle_serving_server_gpu paddle_serving_client paddle_serving_app paddlepaddle-gpu
+```
+
+**Git clone Serving Project:**
+
+```
+git clone https://github.com/paddlepaddle/Serving
+```
+
+**Run OCR example**:
+
+```
+cd Serving/python/example/ocr
+python -m paddle_serving_app.package --get_model ocr_rec
+tar -xzvf ocr_rec.tar.gz
+python -m paddle_serving_app.package --get_model ocr_det
+tar -xzvf ocr_det.tar.gz
+python ocr_debugger_server.py &
+python ocr_web_client.py
+```
+
+### Create a new Paddle Serving Web Service on Windows
+
+Currently Windows supports the Local Predictor of the Web Service framework. The server code framework is as follows
+
+```
+# filename:your_webservice.py
+from paddle_serving_server.web_service import WebService
+# If it is the GPU version, please use from paddle_serving_server_gpu.web_service import WebService
+class YourWebService(WebService):
+ def preprocess(self, feed=[], fetch=[]):
+ #Implement pre-processing here
+ #feed_dict is key: var names, value: numpy array input
+ #fetch_names is a list of fetch variable names
+ The meaning of #is_batch is whether the numpy array in the value of feed_dict contains the batch dimension
+ return feed_dict, fetch_names, is_batch
+ def postprocess(self, feed={}, fetch=[], fetch_map=None):
+ #fetch map is the returned dictionary after prediction, the key is the fetch names given when the process returns, and the value is the var specific value corresponding to the fetch names
+ #After processing here, the result needs to be converted into a dictionary again, and the type of values should be a list, so that it can be serialized in JSON to facilitate web return
+ return response
+
+your_service = YourService(name="XXX")
+your_service.load_model_config("your_model_path")
+your_service.prepare_server(workdir="workdir", port=9292)
+# If you are a GPU user, you can refer to the python example under python/examples/ocr
+your_service.run_debugger_service()
+# Windows platform cannot use run_rpc_service() interface
+your_service.run_web_service()
+```
+
+Client code example
+
+```
+# filename:your_client.py
+import requests
+import json
+import base64
+import os, sys
+import time
+import cv2 # If you need to upload pictures
+# Used for image reading, the principle is to use base64 encoding file content
+def cv2_to_base64(image):
+ return base64.b64encode(image).decode(
+ 'utf8') #data.tostring()).decode('utf8')
+
+headers = {"Content-type": "application/json"}
+url = "http://127.0.0.1:9292/XXX/prediction" # XXX depends on the initial name parameter of the server YourService
+r = requests.post(url=url, headers=headers, data=json.dumps(data))
+print(r.json())
+```
+
+The user only needs to follow the above instructions and implement the relevant content in the corresponding function. For more information, please refer to [How to develop a new Web Service? ](./NEW_WEB_SERVICE.md)
+
+Execute after development
+
+```
+python your_webservice.py &
+python your_client.py
+```
+
+Because the port needs to be occupied, there may be a security prompt during the startup process. Please click through and an IP address will be generated. It should be noted that when the Windows platform starts the service, the local IP address may not be 127.0.0.1. You need to confirm the IP address and then see how the Client should set the access IP.
+
+### Docker for Windows User Guide
+
+The above content is used for native Windows. If users want to experience complete functions, they need to use Docker tools to model Linux systems.
+
+Please refer to [Docker Desktop](https://www.docker.com/products/docker-desktop) to install Docker
+
+After installation, start the docker linux engine and download the relevant image. In the Serving directory
+
+```
+docker pull hub.baidubce.com/paddlepaddle/serving:latest-devel
+# There is no expose port here, users can set -p to perform port mapping as needed
+docker run --rm -dit --name serving_devel -v $PWD:/Serving hub.baidubce.com/paddlepaddle/serving:latest-devel
+docker exec -it serving_devel bash
+cd /Serving
+```
+
+The rest of the operations are exactly the same as the Linux version.
diff --git a/doc/WINDOWS_TUTORIAL_CN.md b/doc/WINDOWS_TUTORIAL_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd1074bdba6a4e00572b010b28f80f3f424787e0
--- /dev/null
+++ b/doc/WINDOWS_TUTORIAL_CN.md
@@ -0,0 +1,126 @@
+## Windows平台使用Paddle Serving指导
+
+([English](./WINDOWS_TUTORIAL.md)|简体中文)
+
+### 综述
+
+本文档指导用户如何在Windows平台手把手搭建Paddle Serving服务。由于受限第三方库的支持,Windows平台目前只支持用web service的方式搭建local predictor预测服务。如果想要体验全部的服务,需要使用Docker for Windows,来模拟Linux的运行环境。
+
+### 原生Windows系统运行Paddle Serving
+
+**配置Python环境变量到PATH**:首先需要将Python的可执行程序所在目录加入到PATH当中。通常在**系统属性/我的电脑属性**-**高级**-**环境变量** ,点选Path,并在开头加上路径。例如`C:\Users\$USER\AppData\Local\Programs\Python\Python36`,最后连续点击**确定** 。在Powershell上如果输入python可以进入python交互界面,说明环境变量配置成功。
+
+**安装wget工具**:由于教程当中所有的下载,以及`paddle_serving_app`当中内嵌的模型下载功能,都是用到wget工具,在链接[下载wget](http://gnuwin32.sourceforge.net/packages/wget.htm),解压后复制到`C:\Windows\System32`下,如有安全提示需要通过。
+
+**安装Git工具**: 详情参见[Git官网](https://git-scm.com/downloads)
+
+**安装必要的C++库(可选)**:部分用户可能会在`import paddle`阶段遇见dll无法链接的问题,建议可以[安装Visual Studio社区版本](`https://visualstudio.microsoft.com/`) ,并且安装C++的相关组件。
+
+**安装Paddle和Serving**:在Powershell,执行
+
+```
+python -m pip install -U paddle_serving_server paddle_serving_client paddle_serving_app paddlepaddle`
+```
+
+如果是GPU用户
+
+```
+python -m pip install -U paddle_serving_server_gpu paddle_serving_client paddle_serving_app paddlepaddle-gpu
+```
+
+**下载Serving库**:
+
+```
+git clone https://github.com/paddlepaddle/Serving
+```
+
+**运行OCR示例**:
+
+```
+cd Serving/python/example/ocr
+python -m paddle_serving_app.package --get_model ocr_rec
+tar -xzvf ocr_rec.tar.gz
+python -m paddle_serving_app.package --get_model ocr_det
+tar -xzvf ocr_det.tar.gz
+python ocr_debugger_server.py &
+python ocr_web_client.py
+```
+
+### 创建新的Windows支持的Paddle Serving服务
+
+目前Windows支持Web Service框架的Local Predictor。服务端代码框架如下
+
+```
+# filename:your_webservice.py
+from paddle_serving_server.web_service import WebService
+# 如果是GPU版本,请使用 from paddle_serving_server_gpu.web_service import WebService
+class YourWebService(WebService):
+ def preprocess(self, feed=[], fetch=[]):
+ #在这里实现前处理
+ #feed_dict是 key: var names, value: numpy array input
+ #fetch_names 是fetch变量名列表
+ #is_batch的含义是feed_dict的value里的numpy array是否包含了batch维度
+ return feed_dict, fetch_names, is_batch
+ def postprocess(self, feed={}, fetch=[], fetch_map=None):
+ #fetch map是经过预测之后的返回字典,key是process返回时给定的fetch names,value是对应fetch names的var具体值
+ #在这里做处理之后,结果需重新转换成字典,并且values的类型应是列表list,这样可以JSON序列化方便web返回
+ return response
+
+your_service = YourService(name="XXX")
+your_service.load_model_config("your_model_path")
+your_service.prepare_server(workdir="workdir", port=9292)
+# 如果是GPU用户,可以参照python/examples/ocr下的python示例
+your_service.run_debugger_service()
+# Windows平台不可以使用 run_rpc_service()接口
+your_service.run_web_service()
+```
+
+客户端代码示例
+
+```
+# filename:your_client.py
+import requests
+import json
+import base64
+import os, sys
+import time
+import cv2 # 如果需要上传图片
+# 用于图片读取,原理是采用base64编码文件内容
+def cv2_to_base64(image):
+ return base64.b64encode(image).decode(
+ 'utf8') #data.tostring()).decode('utf8')
+
+headers = {"Content-type": "application/json"}
+url = "http://127.0.0.1:9292/XXX/prediction" # XXX取决于服务端YourService的初始化name参数
+r = requests.post(url=url, headers=headers, data=json.dumps(data))
+print(r.json())
+```
+
+用户只需要按照如上指示,在对应函数中实现相关内容即可。更多信息请参见[如何开发一个新的Web Service?](./NEW_WEB_SERVICE_CN.md)
+
+开发完成后执行
+
+```
+python your_webservice.py &
+python your_client.py
+```
+
+因为需要占用端口,因此启动过程可能会有安全提示,请点选通过,就会有IP地址生成。需要注意的是,Windows平台启动服务时,本地IP地址可能不是127.0.0.1,需要确认好IP地址再看Client应该如何设定访问IP。
+
+### Docker for Windows 使用指南
+
+以上内容用于原生的Windows,如果用户想要体验完整的功能,需要使用Docker工具,来模拟Linux系统。
+
+安装Docker请参考[Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+安装之后启动docker的linux engine,下载相关镜像。在Serving目录下
+
+```
+docker pull hub.baidubce.com/paddlepaddle/serving:latest-devel
+# 此处没有expose端口,用户可根据需要设置-p来进行端口映射
+docker run --rm -dit --name serving_devel -v $PWD:/Serving hub.baidubce.com/paddlepaddle/serving:latest-devel
+docker exec -it serving_devel bash
+cd /Serving
+```
+
+其余操作与Linux版本完全一致。
diff --git a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h
index a4d8dda71a7977185106bb1552cb8f39ef6bc50e..5f54bf3ceb3808eeff7d9d87cb56e3549d9ec44f 100644
--- a/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h
+++ b/paddle_inference/inferencer-fluid-cpu/include/fluid_cpu_engine.h
@@ -13,7 +13,6 @@
// limitations under the License.
#pragma once
-
#include
#include
#include