diff --git a/docker/build/installers/install_nvidia_ml_for_jetson.sh b/docker/build/installers/install_nvidia_ml_for_jetson.sh index f960e2e778c9baa8bd62176e79796b59da661d01..4092dab62c7f20c38d86f32f74ee73ed780a0dae 100755 --- a/docker/build/installers/install_nvidia_ml_for_jetson.sh +++ b/docker/build/installers/install_nvidia_ml_for_jetson.sh @@ -22,6 +22,8 @@ cd "$(dirname "${BASH_SOURCE[0]}")" # Fail on first error. set -e +TENSORRT_NEEDED=0 + ##===== Install CUDA 10.2 =====## VERSION_1="10-2" VERSION_2="10.2.89" @@ -66,19 +68,29 @@ for pkg in ${CUDNN_PKGS}; do done for pkg in ${CUDNN_PKGS}; do - sudo dpkg -i "${pkg}" + dpkg -i "${pkg}" rm -rf ${pkg} done info "Successfully installed CUDNN ${MAJOR}" -##===== Install TensorRT 7 =====## +# Install pre-reqs for TensorRT from local CUDA repo -# Install PreReqs from local CUDA repo apt-get -y install \ libcublas10 \ libcublas-dev +# Kick the ladder and cleanup +apt-get -y purge "cuda-repo-l4t-${VERSION_1}-local-${VERSION_2}" + +apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +if [ "${TENSORRT_NEEDED}" -eq 0 ]; then + exit 0 +fi + +CUDA_VER="10.2" TRT_VER1="7.1.0-1" MAJOR="${TRT_VER1%%.*}" TRT_VERSION="${TRT_VER1}+cuda${CUDA_VER}" @@ -107,7 +119,7 @@ dpkg -i ${TRT_PKGS} info "Successfully installed TensorRT ${MAJOR}" -# Kick the ladder and cleanup -apt-get -y purge "cuda-repo-l4t-${VERSION_1}-local-${VERSION_2}" +rm -rf ${TRT_PKGS} + apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/docker/build/installers/install_tensorrt.sh b/docker/build/installers/install_tensorrt.sh index 3686a1a479c709f3bec71a7ea5c399f3b370e2fc..d740d579c86e029e95be6fc8a052aa279272e0e8 100755 --- a/docker/build/installers/install_tensorrt.sh +++ b/docker/build/installers/install_tensorrt.sh @@ -23,6 +23,44 @@ cd "$(dirname "${BASH_SOURCE[0]}")" . /tmp/installers/installer_base.sh +ARCH="$(uname -m)" + +if [ "${ARCH}" = "aarch64" ]; then + CUDA_VER="10.2" + TRT_VER1="7.1.0-1" + MAJOR="${TRT_VER1%%.*}" + TRT_VERSION="${TRT_VER1}+cuda${CUDA_VER}" + + TRT_PKGS="\ +libnvinfer${MAJOR}_${TRT_VERSION}_arm64.deb \ +libnvinfer-bin_${TRT_VERSION}_arm64.deb \ +libnvinfer-dev_${TRT_VERSION}_arm64.deb \ +libnvinfer-plugin${MAJOR}_${TRT_VERSION}_arm64.deb \ +libnvinfer-plugin-dev_${TRT_VERSION}_arm64.deb \ +libnvonnxparsers${MAJOR}_${TRT_VERSION}_arm64.deb \ +libnvonnxparsers-dev_${TRT_VERSION}_arm64.deb \ +libnvparsers${MAJOR}_${TRT_VERSION}_arm64.deb \ +libnvparsers-dev_${TRT_VERSION}_arm64.deb \ +" + + # tensorrt_7.1.0.16-1+cuda10.2_arm64.deb + # libnvinfer-doc_${TRT_VERSION}_all.deb + # libnvinfer-samples_${TRT_VERSION}_all.deb + + for pkg in ${TRT_PKGS}; do + info "Downloading ${LOCAL_HTTP_ADDR}/${pkg}" + wget "${LOCAL_HTTP_ADDR}/${pkg}" + done + + dpkg -i ${TRT_PKGS} + + info "Successfully installed TensorRT ${MAJOR}" + + rm -rf ${TRT_PKGS} + apt-get clean + exit 0 +fi + #Install the TensorRT package that fits your particular needs. #For only running TensorRT C++ applications: #sudo apt-get install libnvinfer7 libnvonnxparsers7 libnvparsers7 libnvinfer-plugin7 @@ -59,9 +97,15 @@ else libnvinfer-plugin-dev fi -# Make caffe-1.0 compilation pass +# FIXME(all): +# Previously soft sym-linked for successful caffe-1.0 compilation. +# Now that caffe-1.0 is retired, do we still need this? +# Minor changes required: +# 1) cudnn major version hard-code fix: v7,v8,... +# 2) move to cudnn installer section + CUDNN_HEADER_DIR="/usr/include/$(uname -m)-linux-gnu" -[[ -e "${CUDNN_HEADER_DIR}/cudnn.h" ]] || \ +[ -e "${CUDNN_HEADER_DIR}/cudnn.h" ] || \ ln -s "${CUDNN_HEADER_DIR}/cudnn_v7.h" "${CUDNN_HEADER_DIR}/cudnn.h" # Disable nvidia apt sources.list settings to speed up build process diff --git a/docker/build/tegra_cyber.aarch64.dockerfile b/docker/build/tegra_cyber.aarch64.dockerfile index 64bb05a1a55552eb10ef9ae942d4701067bbb1c1..511c10322ff51d8072e846d285e3b9159b303bab 100644 --- a/docker/build/tegra_cyber.aarch64.dockerfile +++ b/docker/build/tegra_cyber.aarch64.dockerfile @@ -7,22 +7,29 @@ ARG INSTALL_MODE LABEL version="1.2" ENV DEBIAN_FRONTEND=noninteractive -ENV PATH /opt/apollo/sysroot/bin:$PATH - -COPY installers /tmp/installers -COPY rcfiles /opt/apollo/rcfiles -# Pre-downloaded tarballs -COPY archive /tmp/archive - -RUN bash /tmp/installers/install_minimal_environment.sh ${GEOLOC} -RUN bash /tmp/installers/install_bazel.sh -RUN bash /tmp/installers/install_cmake.sh ${INSTALL_MODE} -RUN bash /tmp/installers/install_llvm_clang.sh -RUN bash /tmp/installers/install_cyber_deps.sh -RUN bash /tmp/installers/install_qa_tools.sh - -RUN bash /tmp/installers/install_visualizer_deps.sh -RUN bash /tmp/installers/post_install.sh ${BUILD_STAGE} +ENV PATH /opt/apollo/sysroot/bin:$PATH -WORKDIR /apollo +COPY installers/installer_base.sh /tmp/installers/ +COPY installers/install_nvidia_ml_for_jetson.sh /tmp/installers/ +COPY rcfiles/apollo.sh.sample /opt/apollo/rcfiles/ + +RUN bash /tmp/installers/install_nvidia_ml_for_jetson.sh + +#COPY installers /tmp/installers +#COPY rcfiles /opt/apollo/rcfiles +## Pre-downloaded tarballs +#COPY archive /tmp/archive +# +#RUN bash /tmp/installers/install_minimal_environment.sh ${GEOLOC} +#RUN bash /tmp/installers/install_bazel.sh +#RUN bash /tmp/installers/install_cmake.sh ${INSTALL_MODE} +#RUN bash /tmp/installers/install_llvm_clang.sh +# +#RUN bash /tmp/installers/install_cyber_deps.sh +#RUN bash /tmp/installers/install_qa_tools.sh +# +#RUN bash /tmp/installers/install_visualizer_deps.sh +#RUN bash /tmp/installers/post_install.sh ${BUILD_STAGE} +# +#WORKDIR /apollo