未验证 提交 8d796533 编写于 作者: T TeslaZhao 提交者: GitHub

Merge pull request #33 from PaddlePaddle/develop

Sync from PaddlePaddle/Serving repo
......@@ -104,7 +104,22 @@ class PaddleInferenceEngine : public PaddleEngineBase {
Config config;
// todo, auto config(zhangjun)
if (engine_conf.has_combined_model()) {
if (engine_conf.has_encrypted_model() && engine_conf.encrypted_model()) {
// decrypt model
std::string model_buffer, params_buffer, key_buffer;
predictor::ReadBinaryFile(model_path + "encrypt_model", &model_buffer);
predictor::ReadBinaryFile(model_path + "encrypt_params", &params_buffer);
predictor::ReadBinaryFile(model_path + "key", &key_buffer);
auto cipher = paddle::MakeCipher("");
std::string real_model_buffer = cipher->Decrypt(model_buffer, key_buffer);
std::string real_params_buffer =
cipher->Decrypt(params_buffer, key_buffer);
config.SetModelBuffer(&real_model_buffer[0],
real_model_buffer.size(),
&real_params_buffer[0],
real_params_buffer.size());
} else if (engine_conf.has_combined_model()) {
if (!engine_conf.combined_model()) {
config.SetModel(model_path);
} else {
......@@ -156,22 +171,6 @@ class PaddleInferenceEngine : public PaddleEngineBase {
config.EnableMemoryOptim();
}
if (engine_conf.has_encrypted_model() && engine_conf.encrypted_model()) {
// decrypt model
std::string model_buffer, params_buffer, key_buffer;
predictor::ReadBinaryFile(model_path + "encrypt_model", &model_buffer);
predictor::ReadBinaryFile(model_path + "encrypt_params", &params_buffer);
predictor::ReadBinaryFile(model_path + "key", &key_buffer);
auto cipher = paddle::MakeCipher("");
std::string real_model_buffer = cipher->Decrypt(model_buffer, key_buffer);
std::string real_params_buffer =
cipher->Decrypt(params_buffer, key_buffer);
config.SetModelBuffer(&real_model_buffer[0],
real_model_buffer.size(),
&real_params_buffer[0],
real_params_buffer.size());
}
predictor::AutoLock lock(predictor::GlobalCreateMutex::instance());
_predictor = CreatePredictor(config);
......
......@@ -107,7 +107,7 @@ ocr_service.prepare_server(workdir="workdir", port=9292)
ocr_service.init_det_debugger(det_model_config="ocr_det_model")
if sys.argv[1] == 'gpu':
ocr_service.set_gpus("2")
ocr_service.run_debugger_service()
ocr_service.run_debugger_service(gpu = True)
elif sys.argv[1] == 'cpu':
ocr_service.run_debugger_service()
ocr_service.run_web_service()
......@@ -13,7 +13,10 @@ sh get_data.sh
## RPC service
### Start server
``` shell
python test_server.py uci_housing_model/
```
You can alse use the following code to start the RPC service
```shell
python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393 --use_lite --use_xpu --ir_optim
```
......
......@@ -18,5 +18,5 @@ python -m paddle_serving_server.serve --model resnet_v2_50_imagenet_model --port
### Client Prediction
```
python resnet50_v2_client.py
python resnet50_client.py
```
......@@ -18,5 +18,5 @@ python -m paddle_serving_server.serve --model resnet_v2_50_imagenet_model --port
### 客户端预测
```
python resnet50_v2_client.py
python resnet50_client.py
```
......@@ -266,8 +266,12 @@ class WebService(object):
def _launch_local_predictor(self, gpu):
from paddle_serving_app.local_predict import LocalPredictor
self.client = LocalPredictor()
self.client.load_model_config(
"{}".format(self.model_config), use_gpu=True, gpu_id=self.gpus[0])
if gpu:
self.client.load_model_config(
"{}".format(self.model_config), use_gpu=True, gpu_id=self.gpus[0])
else:
self.client.load_model_config(
"{}".format(self.model_config), use_gpu=False)
def run_web_service(self):
print("This API will be deprecated later. Please do not use it")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册