提交 49fdcbea 编写于 作者: C Channingss

support encryption

上级 0f72bbfe
......@@ -5,10 +5,12 @@ option(WITH_MKL "Compile demo with MKL/OpenBlas support,defaultuseMKL."
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." ON)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." OFF)
option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
option(WITH_ENCRYPTION "Compile demo with encryption tool." OFF)
SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
SET(TENSORRT_DIR "" CACHE PATH "Location of libraries")
SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
SET(ENCRYPTION_DIR"" CACHE PATH "Location of libraries")
SET(CUDA_LIB "" CACHE PATH "Location of libraries")
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
......@@ -29,6 +31,10 @@ macro(safe_set_static_flag)
endforeach(flag_var)
endmacro()
if (WITH_ENCRYPTION)
add_definitions( -DWITH_ENCRYPTION=${WITH_ENCRYPTION})
endif()
if (WITH_MKL)
ADD_DEFINITIONS(-DUSE_MKL)
endif()
......@@ -209,6 +215,16 @@ if(WITH_GPU)
endif()
endif()
if(WITH_ENCRYPTION)
if(NOT WIN32)
include_directories("${ENCRYPTION_DIR}/include")
link_directories("${ENCRYPTION_DIR}/lib")
set(DEPS ${DEPS} ${ENCRYPTION_DIR}/lib/libpmodel-decrypt${CMAKE_SHARED_LIBRARY_SUFFIX})
else()
message(FATAL_ERROR "Encryption Tool don't support WINDOWS")
endif()
endif()
if (NOT WIN32)
set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
set(DEPS ${DEPS} ${EXTERNAL_LIB})
......
......@@ -32,6 +32,11 @@
#include "include/paddlex/results.h"
#include "include/paddlex/transforms.h"
#ifdef WITH_ENCRYPTION
#include "paddle_model_decrypt.h"
#include "model_code.h"
#endif
namespace PaddleX {
class Model {
......@@ -39,14 +44,16 @@ class Model {
void Init(const std::string& model_dir,
bool use_gpu = false,
bool use_trt = false,
int gpu_id = 0) {
create_predictor(model_dir, use_gpu, use_trt, gpu_id);
int gpu_id = 0,
std::string key = "") {
create_predictor(model_dir, use_gpu, use_trt, gpu_id, key);
}
void create_predictor(const std::string& model_dir,
bool use_gpu = false,
bool use_trt = false,
int gpu_id = 0);
int gpu_id = 0,
std::string key = "");
bool load_config(const std::string& model_dir);
......
......@@ -16,6 +16,11 @@ CUDA_LIB=/path/to/cuda/lib/
# CUDNN 的 lib 路径
CUDNN_LIB=/path/to/cudnn/lib/
# 是否加载加密后的模型
WITH_ENCRYPTION=OFF
# 加密工具的路径
ENCRYPTION_DIR=/path/to/encryption_tool/
# OPENCV 路径, 如果使用自带预编译版本可不修改
OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/
sh $(pwd)/scripts/bootstrap.sh
......@@ -28,10 +33,12 @@ cmake .. \
-DWITH_GPU=${WITH_GPU} \
-DWITH_MKL=${WITH_MKL} \
-DWITH_TENSORRT=${WITH_TENSORRT} \
-DWITH_ENCRYPTION=${WITH_ENCRYPTION} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DPADDLE_DIR=${PADDLE_DIR} \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDA_LIB=${CUDA_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \
-DENCRYPTION_DIR=${ENCRYPTION_DIR} \
-DOPENCV_DIR=${OPENCV_DIR}
make
......@@ -19,7 +19,8 @@ namespace PaddleX {
void Model::create_predictor(const std::string& model_dir,
bool use_gpu,
bool use_trt,
int gpu_id) {
int gpu_id,
std::string key) {
// 读取配置文件
if (!load_config(model_dir)) {
std::cerr << "Parse file 'model.yml' failed!" << std::endl;
......@@ -28,7 +29,14 @@ void Model::create_predictor(const std::string& model_dir,
paddle::AnalysisConfig config;
std::string model_file = model_dir + OS_PATH_SEP + "__model__";
std::string params_file = model_dir + OS_PATH_SEP + "__params__";
config.SetModel(model_file, params_file);
#ifdef WITH_ENCRYPTION
if (key != ""){
paddle_security_load_model(&config, key.c_str(), model_file.c_str(), params_file.c_str());
}
#endif
if (key == ""){
config.SetModel(model_file, params_file);
}
if (use_gpu) {
config.EnableUseGpu(100, gpu_id);
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册