提交 f1eefb85 编写于 作者: D Dong Daxiang 提交者: GitHub

Merge pull request #194 from guru4elephant/reorg_dir

Reorg dir
...@@ -65,8 +65,8 @@ endif() ...@@ -65,8 +65,8 @@ endif()
if (NOT CLIENT_ONLY) if (NOT CLIENT_ONLY)
include(external/jsoncpp) include(external/jsoncpp)
include(external/rocksdb) include(external/rocksdb)
include(external/gtest)
endif() endif()
#include(external/gtest)
include(external/snappy) include(external/snappy)
include(external/leveldb) include(external/leveldb)
......
...@@ -28,7 +28,7 @@ ExternalProject_Add( ...@@ -28,7 +28,7 @@ ExternalProject_Add(
GIT_TAG master GIT_TAG master
UPDATE_COMMAND "" UPDATE_COMMAND ""
CONFIGURE_COMMAND "" CONFIGURE_COMMAND ""
BUILD_COMMAND CXXFLAGS=-fPIC && mkdir build && cd build && cmake .. && make -j ${NUM_OF_PROCESSOR} gtest BUILD_COMMAND CXXFLAGS=-fPIC && mkdir -p build && cd build && cmake .. && make -j ${NUM_OF_PROCESSOR} gtest
INSTALL_COMMAND mkdir -p ${GTEST_INSTALL_DIR}/lib/ INSTALL_COMMAND mkdir -p ${GTEST_INSTALL_DIR}/lib/
&& cp ${GTEST_SOURCES_DIR}/src/extern_gtest/build/lib/libgtest.a ${GTEST_LIBRARIES} && cp ${GTEST_SOURCES_DIR}/src/extern_gtest/build/lib/libgtest.a ${GTEST_LIBRARIES}
&& cp -r ${GTEST_SOURCES_DIR}/src/extern_gtest/googletest/include ${GTEST_INSTALL_DIR}/ && cp -r ${GTEST_SOURCES_DIR}/src/extern_gtest/googletest/include ${GTEST_INSTALL_DIR}/
...@@ -41,4 +41,3 @@ ADD_LIBRARY(gtest STATIC IMPORTED GLOBAL) ...@@ -41,4 +41,3 @@ ADD_LIBRARY(gtest STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET gtest PROPERTY IMPORTED_LOCATION ${GTEST_LIBRARIES}) SET_PROPERTY(TARGET gtest PROPERTY IMPORTED_LOCATION ${GTEST_LIBRARIES})
LIST(APPEND external_project_dependencies gtest) LIST(APPEND external_project_dependencies gtest)
...@@ -67,10 +67,10 @@ endif() ...@@ -67,10 +67,10 @@ endif()
target_link_libraries(cube brpc ${DYNAMIC_LIB} -lpthread -ldl -lz) target_link_libraries(cube brpc ${DYNAMIC_LIB} -lpthread -ldl -lz)
add_executable(cube_test ${SRC_LIST} test/cube_test.cpp #add_executable(cube_test ${SRC_LIST} test/cube_test.cpp
${PROTO_SRC} ${PROTO_HEADER}) # ${PROTO_SRC} ${PROTO_HEADER})
target_link_libraries(cube_test brpc ${DYNAMIC_LIB} gtest -lpthread -ldl -lz) #target_link_libraries(cube_test brpc ${DYNAMIC_LIB} gtest -lpthread -ldl -lz)
# install # install
install(TARGETS cube install(TARGETS cube
......
include_directories(SYSTEM ${CMAKE_CURRENT_LIST_DIR}/include) include_directories(SYSTEM ${CMAKE_CURRENT_LIST_DIR}/include)
set(SRC_LIST ${CMAKE_CURRENT_LIST_DIR}/src/test_rocksdb.cpp set(SRC_LIST ${CMAKE_CURRENT_LIST_DIR}/src/rockskvdb_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/rockskvdb_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/param_dict_mgr_impl.cpp ${CMAKE_CURRENT_LIST_DIR}/src/param_dict_mgr_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/mock_param_dict_impl.cpp ${CMAKE_CURRENT_LIST_DIR}/src/mock_param_dict_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/paddle_rocksdb.cpp ${CMAKE_CURRENT_LIST_DIR}/src/paddle_rocksdb.cpp)
${CMAKE_CURRENT_LIST_DIR}/src/gtest_kvdb.cpp)
add_library(kvdb ${SRC_LIST}) add_library(kvdb ${SRC_LIST})
add_dependencies(kvdb rocksdb) add_dependencies(kvdb rocksdb)
install(TARGETS kvdb ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib/) install(TARGETS kvdb ARCHIVE DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/lib/)
add_executable(kvdb_test ${SRC_LIST})
add_dependencies(kvdb_test rocksdb)
target_link_libraries(kvdb_test rocksdb bz2 snappy zlib gtest)
#target_include_directories(kvdb_test PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include/kvdb)
set(SRC_LIST2 ${CMAKE_CURRENT_LIST_DIR}/src/gtest_db_thread.cpp set(SRC_LIST2 ${CMAKE_CURRENT_LIST_DIR}/src/rockskvdb_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/rockskvdb_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/param_dict_mgr_impl.cpp ${CMAKE_CURRENT_LIST_DIR}/src/param_dict_mgr_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/mock_param_dict_impl.cpp ${CMAKE_CURRENT_LIST_DIR}/src/mock_param_dict_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/paddle_rocksdb.cpp) ${CMAKE_CURRENT_LIST_DIR}/src/paddle_rocksdb.cpp)
add_executable(db_thread ${SRC_LIST2})
add_dependencies(db_thread rocksdb)
target_link_libraries(db_thread rocksdb bz2 snappy zlib gtest)
#target_include_directories(db_thread PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include/kvdb)
set(SRC_LIST3 ${CMAKE_CURRENT_LIST_DIR}/src/gtest_db_func.cpp
${CMAKE_CURRENT_LIST_DIR}/src/rockskvdb_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/param_dict_mgr_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/mock_param_dict_impl.cpp
${CMAKE_CURRENT_LIST_DIR}/src/paddle_rocksdb.cpp)
add_executable(db_func ${SRC_LIST3})
add_dependencies(db_func rocksdb)
target_link_libraries(db_func rocksdb bz2 snappy zlib gtest)
#target_include_directories(db_func PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include/kvdb)
install(TARGETS kvdb_test
RUNTIME DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/demo/kvdb_test)
install(TARGETS db_thread
RUNTIME DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/demo/db_thread)
install(TARGETS db_func
RUNTIME DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/demo/db_func)
file(GLOB kvdb_headers "${CMAKE_CURRENT_LIST_DIR}/include/kvdb/*.h") file(GLOB kvdb_headers "${CMAKE_CURRENT_LIST_DIR}/include/kvdb/*.h")
install(FILES ${kvdb_headers} DESTINATION install(FILES ${kvdb_headers} DESTINATION
${PADDLE_SERVING_INSTALL_DIR}/include/kvdb/) ${PADDLE_SERVING_INSTALL_DIR}/include/kvdb/)
# Paddle Serving
Paddle Serving是PaddlePaddle的在线预估服务框架,能够帮助开发者轻松实现从移动端、服务器端调用深度学习模型的远程预测服务。当前Paddle Serving以支持PaddlePaddle训练的模型为主,可以与Paddle训练框架联合使用,快速部署预估服务。Paddle Serving围绕常见的工业级深度学习模型部署场景进行设计,一些常见的功能包括多模型管理、模型热加载、基于[Baidu-rpc](https://github.com/apache/incubator-brpc)的高并发低延迟响应能力、在线模型A/B实验等。与Paddle训练框架互相配合的API可以使用户在训练与远程部署之间无缝过度,提升深度学习模型的落地效率。
------------
## 快速上手指南
Paddle Serving当前的develop版本支持轻量级Python API进行快速预测,并且与Paddle的训练可以打通。我们以最经典的波士顿房价预测为示例,完整说明在单机进行模型训练以及使用Paddle Serving进行模型部署的过程。
#### 安装
```
pip install paddle-serving-client
pip install paddle-serving-server
```
#### 训练脚本
``` python
import sys
import paddle
import paddle.fluid as fluid
train_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500), batch_size=16)
test_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=16)
x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01)
sgd_optimizer.minimize(avg_loss)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
import paddle_serving_client.io as serving_io
for pass_id in range(30):
for data_train in train_reader():
avg_loss_value, = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data_train),
fetch_list=[avg_loss])
serving_io.save_model(
"serving_server_model", "serving_client_conf",
{"x": x}, {"y": y_predict}, fluid.default_main_program())
```
#### 服务器端代码
``` python
import sys
from paddle_serving.serving_server import OpMaker
from paddle_serving.serving_server import OpSeqMaker
from paddle_serving.serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1])
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
server.run_server()
```
#### 服务器端启动
``` shell
python test_server.py serving_server_model
```
#### 客户端预测
``` python
from paddle_serving_client import Client
import paddle
import sys
client = Client()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9292"])
test_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1)
for data in test_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["y"])
print("{} {}".format(fetch_map["y"][0], data[0][1][0]))
```
### 文档
[设计文档](doc/DESIGN.md)
[FAQ](doc/FAQ.md)
### 资深开发者使用指南
[编译指南](doc/INSTALL.md)
## 贡献
如果你想要给Paddle Serving做贡献,请参考[贡献指南](doc/CONTRIBUTE.md)
# Paddle Serving
Paddle Serving is the online inference service framework of [Paddle](https://github.com/PaddlePaddle/Paddle) that can help developers easily deploy a deep learning model service on server side and send request from mobile devices, edge devices as well as data centers. Currently, Paddle Serving supports the deep learning models produced by Paddle althought it can be very easy to support other deep learning framework's model inference. Paddle Serving is designed oriented from industrial practice. For example, multiple models management for online service, double buffers model loading, models online A/B testing are supported. Highly concurrent [Baidu-rpc](https://github.com/apache/incubator-brpc) is used as the underlying communication library which is also from industry practice. Paddle Serving provides user-friendly API that can integrate with Paddle training code seamlessly, and users can finish model training and model serving in an end-to-end fasion.
## Quick Start
Paddle Serving supports light-weighted Python API for model inference and can be integrated with trainining process seemlessly. Here is a Boston House Pricing example for users to do quick start.
### Installation
```shell
pip install paddle-serving-client
pip install paddle-serving-server
```
### Training Scripts
``` python
import sys
import paddle
import paddle.fluid as fluid
train_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.train(), buf_size=500), batch_size=16)
test_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=16)
x = fluid.data(name='x', shape=[None, 13], dtype='float32')
y = fluid.data(name='y', shape=[None, 1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01)
sgd_optimizer.minimize(avg_loss)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
import paddle_serving_client.io as serving_io
for pass_id in range(30):
for data_train in train_reader():
avg_loss_value, = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data_train),
fetch_list=[avg_loss])
serving_io.save_model(
"serving_server_model", "serving_client_conf",
{"x": x}, {"y": y_predict}, fluid.default_main_program())
```
### Server Side Scripts
```
import sys
from paddle_serving.serving_server import OpMaker
from paddle_serving.serving_server import OpSeqMaker
from paddle_serving.serving_server import Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.load_model_config(sys.argv[1])
server.prepare_server(workdir="work_dir1", port=9393, device="cpu")
```
### Start Server
```
python test_server.py serving_server_model
```
### Client Side Scripts
```
from paddle_serving_client import Client
import paddle
import sys
client = Client()
client.load_client_config(sys.argv[1])
client.connect(["127.0.0.1:9292"])
test_reader = paddle.batch(paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500), batch_size=1)
for data in test_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["y"])
print("{} {}".format(fetch_map["y"][0], data[0][1][0]))
```
### Document
[Design Doc(Chinese)](doc/DESIGN.md)
[FAQ(Chinese)](doc/FAQ.md)
### Advanced features and development
[Develop a serving application with C++(Chinese)](doc/CREATING.md)
[Compile from source code(Chinese)](doc/INSTALL.md)
## Contribution
If you want to contribute code to Paddle Serving, please reference [Contribution Guidelines](
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册