未验证 提交 c2928447 编写于 作者: Z Zhang Jun 提交者: GitHub

Merge branch 'develop' into low-precision

......@@ -108,9 +108,9 @@ class PaddleInferenceEngine : public PaddleEngineBase {
if (engine_conf.has_encrypted_model() && engine_conf.encrypted_model()) {
// decrypt model
std::string model_buffer, params_buffer, key_buffer;
predictor::ReadBinaryFile(model_path + "encrypt_model", &model_buffer);
predictor::ReadBinaryFile(model_path + "encrypt_params", &params_buffer);
predictor::ReadBinaryFile(model_path + "key", &key_buffer);
predictor::ReadBinaryFile(model_path + "/encrypt_model", &model_buffer);
predictor::ReadBinaryFile(model_path + "/encrypt_params", &params_buffer);
predictor::ReadBinaryFile(model_path + "/key", &key_buffer);
auto cipher = paddle::MakeCipher("");
std::string real_model_buffer = cipher->Decrypt(model_buffer, key_buffer);
......
......@@ -31,6 +31,7 @@ dirname is the folder path where the model is located. If the parameter is discr
The key is stored in the `key` file, and the encrypted model file and server-side configuration file are stored in the `encrypt_server` directory.
client-side configuration file are stored in the `encrypt_client` directory.
**Notice:** When encryption prediction is used, the model configuration and parameter folder loaded by server and client should be encrypt_server/ and encrypt_client/
## Start Encryption Service
CPU Service
```
......@@ -43,5 +44,5 @@ python -m paddle_serving_server.serve --model encrypt_server/ --port 9300 --use_
## Prediction
```
python test_client.py uci_housing_client/serving_client_conf.prototxt
python test_client.py encrypt_client/serving_client_conf.prototxt
```
......@@ -31,6 +31,8 @@ def serving_encryption():
密钥保存在`key`文件中,加密模型文件以及server端配置文件保存在`encrypt_server`目录下,client端配置文件保存在`encrypt_client`目录下。
**注意:** 当使用加密预测时,服务端和客户端启动加载的模型配置和参数文件夹是encrypt_server/和encrypt_client/
## 启动加密预测服务
CPU预测服务
```
......@@ -43,5 +45,5 @@ python -m paddle_serving_server.serve --model encrypt_server/ --port 9300 --use_
## 预测
```
python test_client.py uci_housing_client/serving_client_conf.prototxt
python test_client.py encrypt_client/
```
......@@ -42,23 +42,29 @@ from concurrent import futures
class Server(object):
def __init__(self):
"""
self.model_toolkit_conf:'list'=[] # The quantity of self.model_toolkit_conf is equal to the InferOp quantity/Engine--OP
self.model_conf:'collections.OrderedDict()' # Save the serving_server_conf.prototxt content (feed and fetch information) this is a map for multi-model in a workflow
self.workflow_fn:'str'="workflow.prototxt" # Only one for one Service/Workflow
self.resource_fn:'str'="resource.prototxt" # Only one for one Service,model_toolkit_fn and general_model_config_fn is recorded in this file
self.infer_service_fn:'str'="infer_service.prototxt" # Only one for one Service,Service--Workflow
self.model_toolkit_fn:'list'=[] # ["general_infer_0/model_toolkit.prototxt"]The quantity is equal to the InferOp quantity,Engine--OP
self.general_model_config_fn:'list'=[] # ["general_infer_0/general_model.prototxt"]The quantity is equal to the InferOp quantity,Feed and Fetch --OP
self.subdirectory:'list'=[] # The quantity is equal to the InferOp quantity, and name = node.name = engine.name
self.model_config_paths:'collections.OrderedDict()' # Save the serving_server_conf.prototxt path (feed and fetch information) this is a map for multi-model in a workflow
"""
self.server_handle_ = None
self.infer_service_conf = None
self.model_toolkit_conf = [
] #The quantity is equal to the InferOp quantity,Engine--OP
self.model_toolkit_conf = []
self.resource_conf = None
self.memory_optimization = False
self.ir_optimization = False
# save the serving_server_conf.prototxt content (feed and fetch information) this is a map for multi-model in a workflow
self.model_conf = collections.OrderedDict()
self.workflow_fn = "workflow.prototxt" #only one for one Service,Workflow--Op
self.resource_fn = "resource.prototxt" #only one for one Service,model_toolkit_fn and general_model_config_fn is recorded in this file
self.infer_service_fn = "infer_service.prototxt" #only one for one Service,Service--Workflow
#["general_infer_0/model_toolkit.prototxt"]The quantity is equal to the InferOp quantity,Engine--OP
self.workflow_fn = "workflow.prototxt"
self.resource_fn = "resource.prototxt"
self.infer_service_fn = "infer_service.prototxt"
self.model_toolkit_fn = []
#["general_infer_0/general_model.prototxt"]The quantity is equal to the InferOp quantity,Feed and Fetch --OP
self.general_model_config_fn = []
#The quantity is equal to the InferOp quantity, and name = node.name = engine.name
self.subdirectory = []
self.cube_config_fn = "cube.conf"
self.workdir = ""
......@@ -78,7 +84,6 @@ class Server(object):
self.use_trt = False
self.use_lite = False
self.use_xpu = False
# save the serving_server_conf.prototxt path (feed and fetch information) this is a map for multi-model in a workflow
self.model_config_paths = collections.OrderedDict()
self.product_name = None
self.container_id = None
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册