提交 7f6898fa 编写于 作者: F FlyingQianMM

set with_encrypt to true

上级 f7d9b7fb
# download pre-compiled paddle encrypt
ENCRYPTION_URL=https://bj.bcebos.com/paddlex/tools/paddlex-encryption.zip
if [ ! -d "./paddlex-encryption" ]; then
wget -c ${ENCRYPTION_URL}
unzip paddlex-encryption.zip
rm -rf paddlex-encryption.zip
fi
# download pre-compiled opencv lib # download pre-compiled opencv lib
OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2 OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2
if [ ! -d "./deps/opencv3gcc4.8" ]; then if [ ! -d "./deps/opencv3gcc4.8" ]; then
......
# 是否使用GPU(即是否使用 CUDA) # 是否使用GPU(即是否使用 CUDA)
WITH_GPU=OFF WITH_GPU=OFF
# 使用MKL or openblas # 使用MKL or openblas
WITH_MKL=ON WITH_MKL=ON
# 是否集成 TensorRT(仅WITH_GPU=ON 有效) # 是否集成 TensorRT(仅WITH_GPU=ON 有效)
WITH_TENSORRT=OFF WITH_TENSORRT=OFF
# TensorRT 的lib路径 # TensorRT 的路径
TENSORRT_DIR=/path/to/TensorRT/ TENSORRT_DIR=/path/to/TensorRT/
# Paddle 预测库路径 # Paddle 预测库路径
PADDLE_DIR=/path/to/fluid_inference/ PADDLE_DIR=/docker/jiangjiajun/PaddleDetection/deploy/cpp/fluid_inference
# Paddle 的预测库是否使用静态库来编译 # Paddle 的预测库是否使用静态库来编译
# 使用TensorRT时,Paddle的预测库通常为动态库 # 使用TensorRT时,Paddle的预测库通常为动态库
WITH_STATIC_LIB=OFF WITH_STATIC_LIB=OFF
# CUDA 的 lib 路径 # CUDA 的 lib 路径
CUDA_LIB=/path/to/cuda/lib/ CUDA_LIB=/usr/local/cuda/lib64
# CUDNN 的 lib 路径 # CUDNN 的 lib 路径
CUDNN_LIB=/path/to/cudnn/lib/ CUDNN_LIB=/usr/local/cuda/lib64
# 是否加载加密后的模型 # 是否加载加密后的模型
WITH_ENCRYPTION=OFF WITH_ENCRYPTION=ON
# 加密工具的路径 # 加密工具的路径, 如果使用自带预编译版本可不修改
ENCRYPTION_DIR=/path/to/encryption_tool/ sh $(pwd)/scripts/bootstrap.sh # 下载预编译版本的加密工具
ENCRYPTION_DIR=$(pwd)/paddlex-encryption
# OPENCV 路径, 如果使用自带预编译版本可不修改
OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/ # OPENCV 路径, 如果使用自带预编译版本可不修改
sh $(pwd)/scripts/bootstrap.sh sh $(pwd)/scripts/bootstrap.sh # 下载预编译版本的opencv
OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/
# 以下无需改动
rm -rf build # 以下无需改动
mkdir -p build rm -rf build
cd build mkdir -p build
cmake .. \ cd build
-DWITH_GPU=${WITH_GPU} \ cmake .. \
-DWITH_MKL=${WITH_MKL} \ -DWITH_GPU=${WITH_GPU} \
-DWITH_TENSORRT=${WITH_TENSORRT} \ -DWITH_MKL=${WITH_MKL} \
-DWITH_ENCRYPTION=${WITH_ENCRYPTION} \ -DWITH_TENSORRT=${WITH_TENSORRT} \
-DTENSORRT_DIR=${TENSORRT_DIR} \ -DWITH_ENCRYPTION=${WITH_ENCRYPTION} \
-DPADDLE_DIR=${PADDLE_DIR} \ -DTENSORRT_DIR=${TENSORRT_DIR} \
-DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ -DPADDLE_DIR=${PADDLE_DIR} \
-DCUDA_LIB=${CUDA_LIB} \ -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
-DCUDNN_LIB=${CUDNN_LIB} \ -DCUDA_LIB=${CUDA_LIB} \
-DENCRYPTION_DIR=${ENCRYPTION_DIR} \ -DCUDNN_LIB=${CUDNN_LIB} \
-DOPENCV_DIR=${OPENCV_DIR} -DENCRYPTION_DIR=${ENCRYPTION_DIR} \
make -DOPENCV_DIR=${OPENCV_DIR}
make
...@@ -31,6 +31,8 @@ void Model::create_predictor(const std::string& model_dir, ...@@ -31,6 +31,8 @@ void Model::create_predictor(const std::string& model_dir,
std::string params_file = model_dir + OS_PATH_SEP + "__params__"; std::string params_file = model_dir + OS_PATH_SEP + "__params__";
#ifdef WITH_ENCRYPTION #ifdef WITH_ENCRYPTION
if (key != ""){ if (key != ""){
model_file = model_dir + OS_PATH_SEP + "__model__.encrypted";
params_file = model_dir + OS_PATH_SEP + "__params__.encrypted";
paddle_security_load_model(&config, key.c_str(), model_file.c_str(), params_file.c_str()); paddle_security_load_model(&config, key.c_str(), model_file.c_str(), params_file.c_str());
} }
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册