diff --git a/paddle_inference/paddle/include/paddle_engine.h b/paddle_inference/paddle/include/paddle_engine.h index 41c7ed642c4ddeec8dff2ec8d6b6f7a4e71b6343..10bcdbfd7a075016649a3b986be2d777f3fbe3a8 100644 --- a/paddle_inference/paddle/include/paddle_engine.h +++ b/paddle_inference/paddle/include/paddle_engine.h @@ -108,9 +108,9 @@ class PaddleInferenceEngine : public PaddleEngineBase { if (engine_conf.has_encrypted_model() && engine_conf.encrypted_model()) { // decrypt model std::string model_buffer, params_buffer, key_buffer; - predictor::ReadBinaryFile(model_path + "encrypt_model", &model_buffer); - predictor::ReadBinaryFile(model_path + "encrypt_params", ¶ms_buffer); - predictor::ReadBinaryFile(model_path + "key", &key_buffer); + predictor::ReadBinaryFile(model_path + "/encrypt_model", &model_buffer); + predictor::ReadBinaryFile(model_path + "/encrypt_params", ¶ms_buffer); + predictor::ReadBinaryFile(model_path + "/key", &key_buffer); auto cipher = paddle::MakeCipher(""); std::string real_model_buffer = cipher->Decrypt(model_buffer, key_buffer); diff --git a/python/examples/encryption/README.md b/python/examples/encryption/README.md index d8a04e29bf56439a24db7dadfdfe3ab5d9626e14..a08b8b84241fb699992d1a718f2bfbf986d8d180 100644 --- a/python/examples/encryption/README.md +++ b/python/examples/encryption/README.md @@ -31,6 +31,7 @@ dirname is the folder path where the model is located. If the parameter is discr The key is stored in the `key` file, and the encrypted model file and server-side configuration file are stored in the `encrypt_server` directory. client-side configuration file are stored in the `encrypt_client` directory. +**Notice:** When encryption prediction is used, the model configuration and parameter folder loaded by server and client should be encrypt_server/ and encrypt_client/ ## Start Encryption Service CPU Service ``` @@ -43,5 +44,5 @@ python -m paddle_serving_server.serve --model encrypt_server/ --port 9300 --use_ ## Prediction ``` -python test_client.py uci_housing_client/serving_client_conf.prototxt +python test_client.py encrypt_client/serving_client_conf.prototxt ``` diff --git a/python/examples/encryption/README_CN.md b/python/examples/encryption/README_CN.md index bb853ff37f914a5e2cfe1c6bbb097d17eb99a29a..f950796ec14dadfd7bf6744d94aba4959c838e7f 100644 --- a/python/examples/encryption/README_CN.md +++ b/python/examples/encryption/README_CN.md @@ -31,6 +31,8 @@ def serving_encryption(): 密钥保存在`key`文件中,加密模型文件以及server端配置文件保存在`encrypt_server`目录下,client端配置文件保存在`encrypt_client`目录下。 +**注意:** 当使用加密预测时,服务端和客户端启动加载的模型配置和参数文件夹是encrypt_server/和encrypt_client/ + ## 启动加密预测服务 CPU预测服务 ``` @@ -43,5 +45,5 @@ python -m paddle_serving_server.serve --model encrypt_server/ --port 9300 --use_ ## 预测 ``` -python test_client.py uci_housing_client/serving_client_conf.prototxt +python test_client.py encrypt_client/ ``` diff --git a/python/paddle_serving_server/server.py b/python/paddle_serving_server/server.py index bc268887741c2f28addef43d270210ed487f2242..c08ef838cda60ff147fae8ba1c3470c5f5b8f4d1 100755 --- a/python/paddle_serving_server/server.py +++ b/python/paddle_serving_server/server.py @@ -42,23 +42,29 @@ from concurrent import futures class Server(object): def __init__(self): + """ + self.model_toolkit_conf:'list'=[] # The quantity of self.model_toolkit_conf is equal to the InferOp quantity/Engine--OP + self.model_conf:'collections.OrderedDict()' # Save the serving_server_conf.prototxt content (feed and fetch information) this is a map for multi-model in a workflow + self.workflow_fn:'str'="workflow.prototxt" # Only one for one Service/Workflow + self.resource_fn:'str'="resource.prototxt" # Only one for one Service,model_toolkit_fn and general_model_config_fn is recorded in this file + self.infer_service_fn:'str'="infer_service.prototxt" # Only one for one Service,Service--Workflow + self.model_toolkit_fn:'list'=[] # ["general_infer_0/model_toolkit.prototxt"]The quantity is equal to the InferOp quantity,Engine--OP + self.general_model_config_fn:'list'=[] # ["general_infer_0/general_model.prototxt"]The quantity is equal to the InferOp quantity,Feed and Fetch --OP + self.subdirectory:'list'=[] # The quantity is equal to the InferOp quantity, and name = node.name = engine.name + self.model_config_paths:'collections.OrderedDict()' # Save the serving_server_conf.prototxt path (feed and fetch information) this is a map for multi-model in a workflow + """ self.server_handle_ = None self.infer_service_conf = None - self.model_toolkit_conf = [ - ] #The quantity is equal to the InferOp quantity,Engine--OP + self.model_toolkit_conf = [] self.resource_conf = None self.memory_optimization = False self.ir_optimization = False - # save the serving_server_conf.prototxt content (feed and fetch information) this is a map for multi-model in a workflow self.model_conf = collections.OrderedDict() - self.workflow_fn = "workflow.prototxt" #only one for one Service,Workflow--Op - self.resource_fn = "resource.prototxt" #only one for one Service,model_toolkit_fn and general_model_config_fn is recorded in this file - self.infer_service_fn = "infer_service.prototxt" #only one for one Service,Service--Workflow - #["general_infer_0/model_toolkit.prototxt"]The quantity is equal to the InferOp quantity,Engine--OP + self.workflow_fn = "workflow.prototxt" + self.resource_fn = "resource.prototxt" + self.infer_service_fn = "infer_service.prototxt" self.model_toolkit_fn = [] - #["general_infer_0/general_model.prototxt"]The quantity is equal to the InferOp quantity,Feed and Fetch --OP self.general_model_config_fn = [] - #The quantity is equal to the InferOp quantity, and name = node.name = engine.name self.subdirectory = [] self.cube_config_fn = "cube.conf" self.workdir = "" @@ -78,7 +84,6 @@ class Server(object): self.use_trt = False self.use_lite = False self.use_xpu = False - # save the serving_server_conf.prototxt path (feed and fetch information) this is a map for multi-model in a workflow self.model_config_paths = collections.OrderedDict() self.product_name = None self.container_id = None