提交 7a08cdaf 编写于 作者: M MRXLT

add encryption demo

上级 aacd85c4
......@@ -552,8 +552,8 @@ class FluidCpuAnalysisEncryptCore : public FluidFamilyCore {
}
std::string model_buffer, params_buffer, key_buffer;
ReadBinaryFile(data_path + "encry_model", &model_buffer);
ReadBinaryFile(data_path + "encry_params", &params_buffer);
ReadBinaryFile(data_path + "encrypt_model", &model_buffer);
ReadBinaryFile(data_path + "encrypt_params", &params_buffer);
ReadBinaryFile(data_path + "key", &key_buffer);
VLOG(2) << "prepare for encryption model";
......
......@@ -557,8 +557,8 @@ class FluidGpuAnalysisEncryptCore : public FluidFamilyCore {
}
std::string model_buffer, params_buffer, key_buffer;
ReadBinaryFile(data_path + "encry_model", &model_buffer);
ReadBinaryFile(data_path + "encry_params", &params_buffer);
ReadBinaryFile(data_path + "encrypt_model", &model_buffer);
ReadBinaryFile(data_path + "encrypt_params", &params_buffer);
ReadBinaryFile(data_path + "key", &key_buffer);
VLOG(2) << "prepare for encryption model";
......
# Encryption Model Prediction
([简体中文](README_CN.md)|English)
## Get Origin Model
The example uses the model file of the fit_a_line example as a origin model
```
sh get_data.sh
```
## Encrypt Model
```
python encrypt.py
```
The key is stored in the `key` file, and the encrypted model file and server-side configuration file are stored in the `encrypt_server` directory.
client-side configuration file are stored in the `encrypt_client` directory.
## Start Encryption Service
CPU Service
```
python -m paddle_serving_server.serve --model encrypt_server/ --port 9300 --use_encryption_model
```
GPU Service
```
python -m paddle_serving_server_gpu.serve --model encrypt_server/ --port 9300 --use_encryption_model --gpu_ids 0
```
## Prediction
```
python test_client.py uci_housing_client/serving_client_conf.prototxt
```
# 加密模型预测
(简体中文|[English](README.md))
## 获取明文模型
示例中使用fit_a_line示例的模型文件作为明文模型
```
sh get_data.sh
```
## 模型加密
```
python encrypt.py
```
密钥保存在`key`文件中,加密模型文件以及server端配置文件保存在`encrypt_server`目录下,client端配置文件保存在`encrypt_client`目录下。
## 启动加密预测服务
CPU预测服务
```
python -m paddle_serving_server.serve --model encrypt_server/ --port 9300 --use_encryption_model
```
GPU预测服务
```
python -m paddle_serving_server_gpu.serve --model encrypt_server/ --port 9300 --use_encryption_model --gpu_ids 0
```
## 预测
```
python test_client.py uci_housing_client/serving_client_conf.prototxt
```
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client.io import inference_model_to_serving
def serving_encryption():
inference_model_to_serving(
dirname="./uci_housing_model",
serving_server="encrypt_server",
serving_client="encrypt_client",
encryption=True)
if __name__ == "__main__":
serving_encryption()
wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
tar -xzf uci_housing.tar.gz
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
from paddle_serving_client import Client
import sys
client = Client()
client.load_client_config(sys.argv[1])
client.use_key("./key")
client.connect(["127.0.0.1:9300"], encryption=True)
import paddle
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.uci_housing.test(), buf_size=500),
batch_size=1)
for data in test_reader():
fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
print("{} {}".format(fetch_map["price"][0], data[0][1][0]))
......@@ -21,6 +21,9 @@ from paddle.fluid.framework import Program
from paddle.fluid import CPUPlace
from paddle.fluid.io import save_inference_model
import paddle.fluid as fluid
from paddle.fluid.core import CipherUtils
from paddle.fluid.core import CipherFactory
from paddle.fluid.core import Cipher
from ..proto import general_model_config_pb2 as model_conf
import os
......@@ -29,7 +32,10 @@ def save_model(server_model_folder,
client_config_folder,
feed_var_dict,
fetch_var_dict,
main_program=None):
main_program=None,
encryption=False,
key_len=128,
encrypt_conf=None):
executor = Executor(place=CPUPlace())
feed_var_names = [feed_var_dict[x].name for x in feed_var_dict]
......@@ -38,14 +44,29 @@ def save_model(server_model_folder,
for key in sorted(fetch_var_dict.keys()):
target_vars.append(fetch_var_dict[key])
target_var_names.append(key)
save_inference_model(
server_model_folder,
feed_var_names,
target_vars,
executor,
main_program=main_program)
if not encryption:
save_inference_model(
server_model_folder,
feed_var_names,
target_vars,
executor,
main_program=main_program)
else:
if encrypt_conf == None:
aes_cipher = CipherFactory.create_cipher()
else:
#todo: more encryption algorithms
pass
key = CipherUtils.gen_key_to_file(128, "key")
params = fluid.io.save_persistables(
executor=executor, dirname=None, main_program=main_program)
model = main_program.desc.serialize_to_string()
if not os.path.exists(server_model_folder):
os.makedirs(server_model_folder)
os.chdir(server_model_folder)
aes_cipher.encrypt_to_file(params, key, "encrypt_params")
aes_cipher.encrypt_to_file(model, key, "encrypt_model")
os.chdir("..")
config = model_conf.GeneralModelConfig()
for key in feed_var_dict:
......@@ -111,7 +132,10 @@ def inference_model_to_serving(dirname,
serving_server="serving_server",
serving_client="serving_client",
model_filename=None,
params_filename=None):
params_filename=None,
encryption=False,
key_len=128,
encrypt_conf=None):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_program, feed_target_names, fetch_targets = \
......@@ -122,7 +146,7 @@ def inference_model_to_serving(dirname,
}
fetch_dict = {x.name: x for x in fetch_targets}
save_model(serving_server, serving_client, feed_dict, fetch_dict,
inference_program)
inference_program, encryption, key_len, encrypt_conf)
feed_names = feed_dict.keys()
fetch_names = fetch_dict.keys()
return feed_names, fetch_names
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册