From 7f6898fadce729d539ee59ee9c604986424c0c08 Mon Sep 17 00:00:00 2001 From: FlyingQianMM <245467267@qq.com> Date: Tue, 19 May 2020 06:22:39 +0000 Subject: [PATCH] set with_encrypt to true --- deploy/cpp/scripts/bootstrap.sh | 8 +++ deploy/cpp/scripts/build.sh | 89 +++++++++++++++++---------------- deploy/cpp/src/paddlex.cpp | 2 + 3 files changed, 55 insertions(+), 44 deletions(-) diff --git a/deploy/cpp/scripts/bootstrap.sh b/deploy/cpp/scripts/bootstrap.sh index f9fc1d1..283d759 100644 --- a/deploy/cpp/scripts/bootstrap.sh +++ b/deploy/cpp/scripts/bootstrap.sh @@ -1,3 +1,11 @@ +# download pre-compiled paddle encrypt +ENCRYPTION_URL=https://bj.bcebos.com/paddlex/tools/paddlex-encryption.zip +if [ ! -d "./paddlex-encryption" ]; then + wget -c ${ENCRYPTION_URL} + unzip paddlex-encryption.zip + rm -rf paddlex-encryption.zip +fi + # download pre-compiled opencv lib OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2 if [ ! -d "./deps/opencv3gcc4.8" ]; then diff --git a/deploy/cpp/scripts/build.sh b/deploy/cpp/scripts/build.sh index 5201928..74ab96a 100644 --- a/deploy/cpp/scripts/build.sh +++ b/deploy/cpp/scripts/build.sh @@ -1,44 +1,45 @@ -# 是否使用GPU(即是否使用 CUDA) -WITH_GPU=OFF -# 使用MKL or openblas -WITH_MKL=ON -# 是否集成 TensorRT(仅WITH_GPU=ON 有效) -WITH_TENSORRT=OFF -# TensorRT 的lib路径 -TENSORRT_DIR=/path/to/TensorRT/ -# Paddle 预测库路径 -PADDLE_DIR=/path/to/fluid_inference/ -# Paddle 的预测库是否使用静态库来编译 -# 使用TensorRT时,Paddle的预测库通常为动态库 -WITH_STATIC_LIB=OFF -# CUDA 的 lib 路径 -CUDA_LIB=/path/to/cuda/lib/ -# CUDNN 的 lib 路径 -CUDNN_LIB=/path/to/cudnn/lib/ - -# 是否加载加密后的模型 -WITH_ENCRYPTION=OFF -# 加密工具的路径 -ENCRYPTION_DIR=/path/to/encryption_tool/ - -# OPENCV 路径, 如果使用自带预编译版本可不修改 -OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/ -sh $(pwd)/scripts/bootstrap.sh - -# 以下无需改动 -rm -rf build -mkdir -p build -cd build -cmake .. \ - -DWITH_GPU=${WITH_GPU} \ - -DWITH_MKL=${WITH_MKL} \ - -DWITH_TENSORRT=${WITH_TENSORRT} \ - -DWITH_ENCRYPTION=${WITH_ENCRYPTION} \ - -DTENSORRT_DIR=${TENSORRT_DIR} \ - -DPADDLE_DIR=${PADDLE_DIR} \ - -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ - -DCUDA_LIB=${CUDA_LIB} \ - -DCUDNN_LIB=${CUDNN_LIB} \ - -DENCRYPTION_DIR=${ENCRYPTION_DIR} \ - -DOPENCV_DIR=${OPENCV_DIR} -make +# 是否使用GPU(即是否使用 CUDA) +WITH_GPU=OFF +# 使用MKL or openblas +WITH_MKL=ON +# 是否集成 TensorRT(仅WITH_GPU=ON 有效) +WITH_TENSORRT=OFF +# TensorRT 的路径 +TENSORRT_DIR=/path/to/TensorRT/ +# Paddle 预测库路径 +PADDLE_DIR=/docker/jiangjiajun/PaddleDetection/deploy/cpp/fluid_inference +# Paddle 的预测库是否使用静态库来编译 +# 使用TensorRT时,Paddle的预测库通常为动态库 +WITH_STATIC_LIB=OFF +# CUDA 的 lib 路径 +CUDA_LIB=/usr/local/cuda/lib64 +# CUDNN 的 lib 路径 +CUDNN_LIB=/usr/local/cuda/lib64 + +# 是否加载加密后的模型 +WITH_ENCRYPTION=ON +# 加密工具的路径, 如果使用自带预编译版本可不修改 +sh $(pwd)/scripts/bootstrap.sh # 下载预编译版本的加密工具 +ENCRYPTION_DIR=$(pwd)/paddlex-encryption + +# OPENCV 路径, 如果使用自带预编译版本可不修改 +sh $(pwd)/scripts/bootstrap.sh # 下载预编译版本的opencv +OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/ + +# 以下无需改动 +rm -rf build +mkdir -p build +cd build +cmake .. \ + -DWITH_GPU=${WITH_GPU} \ + -DWITH_MKL=${WITH_MKL} \ + -DWITH_TENSORRT=${WITH_TENSORRT} \ + -DWITH_ENCRYPTION=${WITH_ENCRYPTION} \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DPADDLE_DIR=${PADDLE_DIR} \ + -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ + -DCUDA_LIB=${CUDA_LIB} \ + -DCUDNN_LIB=${CUDNN_LIB} \ + -DENCRYPTION_DIR=${ENCRYPTION_DIR} \ + -DOPENCV_DIR=${OPENCV_DIR} +make diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index d10c3fd..fb7c12c 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -31,6 +31,8 @@ void Model::create_predictor(const std::string& model_dir, std::string params_file = model_dir + OS_PATH_SEP + "__params__"; #ifdef WITH_ENCRYPTION if (key != ""){ + model_file = model_dir + OS_PATH_SEP + "__model__.encrypted"; + params_file = model_dir + OS_PATH_SEP + "__params__.encrypted"; paddle_security_load_model(&config, key.c_str(), model_file.c_str(), params_file.c_str()); } #endif -- GitLab