diff --git a/deploy/cpp/scripts/jetson_bootstrap.sh b/deploy/cpp/scripts/jetson_bootstrap.sh new file mode 100644 index 0000000000000000000000000000000000000000..94d545a38abf1925567f54bc01afbba37371a5f3 --- /dev/null +++ b/deploy/cpp/scripts/jetson_bootstrap.sh @@ -0,0 +1,10 @@ +# download pre-compiled opencv lib +OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/tools/opencv3_aarch.tgz +if [ ! -d "./deps/opencv3" ]; then + mkdir -p deps + cd deps + wget -c ${OPENCV_URL} + tar xvfj opencv3_aarch.tgz + rm -rf opencv3_aarch.tgz + cd .. +fi diff --git a/deploy/cpp/scripts/jetson_build.sh b/deploy/cpp/scripts/jetson_build.sh new file mode 100644 index 0000000000000000000000000000000000000000..95bec3cac95be5cf686d63ec5b0f49f62e706586 --- /dev/null +++ b/deploy/cpp/scripts/jetson_build.sh @@ -0,0 +1,42 @@ +# 是否使用GPU(即是否使用 CUDA) +WITH_GPU=OFF +# 使用MKL or openblas +WITH_MKL=OFF +# 是否集成 TensorRT(仅WITH_GPU=ON 有效) +WITH_TENSORRT=OFF +# TensorRT 的路径,如果需要集成TensorRT,需修改为您实际安装的TensorRT路径 +TENSORRT_DIR=/root/projects/TensorRT/ +# Paddle 预测库路径, 请修改为您实际安装的预测库路径 +PADDLE_DIR=/root/projects/fluid_inference +# Paddle 的预测库是否使用静态库来编译 +# 使用TensorRT时,Paddle的预测库通常为动态库 +WITH_STATIC_LIB=OFF +# CUDA 的 lib 路径 +CUDA_LIB=/usr/local/cuda/lib64 +# CUDNN 的 lib 路径 +CUDNN_LIB=/usr/local/cuda/lib64 + +# 是否加载加密后的模型 +WITH_ENCRYPTION=OFF + +# OPENCV 路径, 如果使用自带预编译版本可不修改 +sh $(pwd)/scripts/jetson_bootstrap.sh # 下载预编译版本的opencv +OPENCV_DIR=$(pwd)/deps/opencv3 + +# 以下无需改动 +rm -rf build +mkdir -p build +cd build +cmake .. \ + -DWITH_GPU=${WITH_GPU} \ + -DWITH_MKL=${WITH_MKL} \ + -DWITH_TENSORRT=${WITH_TENSORRT} \ + -DWITH_ENCRYPTION=${WITH_ENCRYPTION} \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DPADDLE_DIR=${PADDLE_DIR} \ + -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \ + -DCUDA_LIB=${CUDA_LIB} \ + -DCUDNN_LIB=${CUDNN_LIB} \ + -DENCRYPTION_DIR=${ENCRYPTION_DIR} \ + -DOPENCV_DIR=${OPENCV_DIR} +make