提交 e63e8538 编写于 作者: M Megvii Engine Team

feat(whl/windows): refine windows build env

GitOrigin-RevId: 26d2a7db70ebde5a1f05c3b32ba4efcf64701764
上级 0951e416
......@@ -755,8 +755,11 @@ if(MGE_WITH_CUDA)
OR ${MGE_ARCH} STREQUAL "i386"
OR ${MGE_ARCH} STREQUAL "aarch64")
set(MEGDNN_THREADS_512 0)
# ON windows platform, static library just a shell, always fallback to DLL
if(MGE_WITH_CUDA
AND MGE_CUDA_USE_STATIC
AND NOT MSVC
AND NOT WIN32
AND ("${CUDNN_VERSION}" VERSION_GREATER "8.0.0" OR "${CUDNN_VERSION}"
VERSION_EQUAL "8.0.0")
AND (NOT MGE_WITH_CUDNN_SHARED))
......
......@@ -28,9 +28,6 @@ CUresult on_init_failed(int func_idx) {
#define _WRAPLIB_API_CALL CUDAAPI
#define _WRAPLIB_CALLBACK CUDA_CB
#if CUDA_VERSION == 10010
#include "./libcuda-wrap_10.1.h"
//! as some symbols link from cuda lib, but used at other module, export here
#ifdef WIN32
#pragma comment(linker, "/export:cudaSetDevice")
......@@ -41,18 +38,11 @@ CUresult on_init_failed(int func_idx) {
#pragma comment(linker, "/export:cudaGetDevice")
#pragma comment(linker, "/export:cudaDeviceSynchronize")
#endif
#if CUDA_VERSION == 10010
#include "./libcuda-wrap_10.1.h"
#elif CUDA_VERSION == 10020
#include "./libcuda-wrap_10.2.h"
//! as some symbols link from cuda lib, but used at other module, export here
#ifdef WIN32
#pragma comment(linker, "/export:cudaSetDevice")
#pragma comment(linker, "/export:cuCtxGetCurrent")
#pragma comment(linker, "/export:cudaGetDeviceCount")
#pragma comment(linker, "/export:cudaGetDeviceProperties")
#pragma comment(linker, "/export:cudaRuntimeGetVersion")
#pragma comment(linker, "/export:cudaGetDevice")
#pragma comment(linker, "/export:cudaDeviceSynchronize")
#endif
#elif CUDA_VERSION == 11010
#include "./libcuda-wrap_11.1.h"
#elif CUDA_VERSION == 11020
......@@ -65,7 +55,6 @@ CUresult on_init_failed(int func_idx) {
#error "cuda stub not support this cuda version, you can close cuda stub to passby"
#endif
#undef _WRAPLIB_CALLBACK
#undef _WRAPLIB_API_CALL
......
......@@ -628,6 +628,9 @@ class FileBaton:
def _build_with_ninja(build_dir: str, verbose: bool, error_prefix: str):
command = ["ninja", "-v"]
# as cuda 10.1 do not compat with latest VC compiler, we need to specify the version of VC compiler, please refs to CVARS_VER_NEED@scripts/whl/windows/config.sh
# for example put vcvarsall.bat to PATH, then modify command to
# command = ["vcvarsall.bat", "x64", "-vcvars_ver=14.26.28801", "&&", "ninja", "-v"]
env = os.environ.copy()
try:
sys.stdout.flush()
......
pytest==5.3.0 ; python_version <= '3.9'
pytest==6.2.5 ; python_version > '3.9'
pytest-sphinx==0.3.1
pytest-json-report
tensorboardX==2.4
protobuf==3.20.0 ; python_version > '3.8'
six==1.16.0
......
......@@ -49,61 +49,29 @@ But some dependencies need to be installed manually:
### Windows host build
* commands:
```
0: about all windows config
* please check scripts/whl/windows/config.sh
1: install git (Windows GUI)
* download git-install.exe from https://git-scm.com/download/win
* only need choose git-lfs component
* install to default dir: /c/Program\ Files/Git
2: install visual studio 2019 Enterprise (Windows GUI)
* download install exe from https://visualstudio.microsoft.com
* choose "c++ develop" -> choose cmake/MSVC/cmake/windows-sdk when install
* NOTICE: windows sdk version >=14.28.29910 do not compat with CUDA 10.1, please
choose version < 14.28.29910
* then install choosed components
3: install LLVM from https://releases.llvm.org/download.html (Windows GUI)
* llvm install by Visual Studio have some issue, eg, link crash on large project, please use official version
* download install exe from https://releases.llvm.org/download.html
* our ci use LLVM 12.0.1, if u install other version, please modify LLVM_PATH
* install 12.0.1 to /c/Program\ Files/LLVM_12_0_1
4: install python3 (Windows GUI)
* download python 64-bit install exe (we support python3.6-python3.9 now)
https://www.python.org/ftp/python/3.6.8/python-3.6.8-amd64.exe
https://www.python.org/ftp/python/3.7.7/python-3.7.7-amd64.exe
https://www.python.org/ftp/python/3.8.3/python-3.8.3-amd64.exe
https://www.python.org/ftp/python/3.9.4/python-3.9.4-amd64.exe
https://www.python.org/ftp/python/3.10.1/python-3.10.1-amd64.exe
* install 3.6.8 to /c/Users/${USER}/mge_whl_python_env/3.6.8
* install 3.7.7 to /c/Users/${USER}/mge_whl_python_env/3.7.7
* install 3.8.3 to /c/Users/${USER}/mge_whl_python_env/3.8.3
* install 3.9.4 to /c/Users/${USER}/mge_whl_python_env/3.9.4
* install 3.10.1 to /c/Users/${USER}/mge_whl_python_env/3.10.1
* cp python.exe to python3.exe
loop cd /c/Users/${USER}/mge_whl_python_env/*
copy python.exe to python3.exe
* install python depends components
loop cd /c/Users/${USER}/mge_whl_python_env/*
python3.exe -m pip install --upgrade pip
python3.exe -m pip install -r imperative/python/requires.txt
python3.exe -m pip install -r imperative/python/requires-test.txt
5: install cuda components (Windows GUI)
* now we support cuda10.1+cudnn7.6+TensorRT6.0 on Windows
* install cuda10.1 to C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1
* install cudnn7.6 to C:\Program Files\NVIDIA GPU Computing Toolkit\cudnn-10.1-windows10-x64-v7.6.5.32
* install TensorRT6.0 to C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT-6.0.1.5
6: edit system env variables (Windows GUI)
* create new key: "VS_PATH", value: "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise"
* create new key: "LLVM_PATH", value: "C:\Program Files\LLVM_12_0_1"
2: install cuda components (Windows GUI) if you want to build with CUDA
* download cuda/cudnn/trt from NVIDIA or by scripts/whl/windows/cuda_cudnn_install.py
* export CUDA_ROOT_DIR/CUDNN_ROOT_DIR/TRT_ROOT_DIR to real cuda/cudnn/trt location
3: install all env except cuda env
* just run scripts/whl/windows/env_prepare.sh
4: edit system env variables (Windows GUI)
* append "Path" env value
C:\Program Files\Git\cmd
C:\Users\build\mge_whl_python_env\3.8.3
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\bin
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\libnvvp
C:\Program Files\NVIDIA GPU Computing Toolkit\cudnn-10.1-windows10-x64-v7.6.5.32\cuda\bin
C:\Program Files\LLVM_12_0_1\lib\clang\12.0.1\lib\windows
C:\Users\build\megengine_dev_tools\pyenv-win\pyenv-win\versions\3.10.1
change `build` to your real user name
```
### Linux host build
* commands:
```
0: we provide Dockerfile if you do not prepare local env
* check about scripts/whl/manylinux2014/build_image.sh
1: install Cmake, which version >= 3.15.2, ninja-build
2: install gcc/g++, which version >= 6, (gcc/g++ >= 7, if need build training mode)
3: install build-essential git git-lfs gfortran libgfortran-6-dev autoconf gnupg flex bison gperf curl zlib1g-dev gcc-multilib g++-multilib lib32ncurses5-dev libxml2-utils xsltproc unzip libtool librdmacm-dev rdmacm-utils python3-dev python3-numpy texinfo
......
......@@ -25,7 +25,10 @@ if [ $OS = "Darwin" ];then
fi
SRC_DIR=$($READLINK -f "`dirname $0`/../../")
source $SRC_DIR/scripts/cmake-build/utils/utils.sh
source ${SRC_DIR}/scripts/cmake-build/utils/utils.sh
if [[ $OS =~ "NT" ]]; then
source ${SRC_DIR}/scripts/whl/windows/config.sh
fi
config_ninja_default_max_jobs
echo "EXTRA_CMAKE_ARGS: ${EXTRA_CMAKE_ARGS}"
......@@ -165,28 +168,31 @@ function cmake_build() {
function windows_env_err() {
echo "check windows env failed!!"
echo "please install env refs for: scripts/cmake-build/BUILD_README.md"
echo "also refs for: scripts/whl/windows/env_prepare.sh"
exit -1
}
function prepare_env_for_windows_build() {
echo "check Visual Studio install path env..."
if [[ -z $VS_PATH ]];then
echo "can not find visual_studio_path env, pls export you Visual Studio install dir to VS_PATH"
echo "examle for export Visual Studio 2019 Enterprise default install dir"
echo "export VS_PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2019/Enterprise"
exit -1
# check VS_INSTALL_PATH is valid or not
if [ ! -d ${VS_INSTALL_PATH}/Licenses ];then
echo "can not find ${VS_INSTALL_PATH}/Licenses, pls check VS_INSTALL_PATH env"
echo "pls install VisualStudio by scripts/whl/windows/env_prepare.sh"
windows_env_err
else
echo "use ${VS_INSTALL_PATH}"
fi
echo $VS_PATH
# only use cmake/Ninja install from Visual Studio, if not, may build failed
# some user env may install cmake/clang-cl/Ninja at windows-git-bash env, so we put Visual Studio
# path at the head of PATH, and check the valid
echo "check cmake install..."
export PATH=$VS_PATH/Common7/IDE/CommonExtensions/Microsoft/CMake/CMake/bin/:$PATH
export PATH=${VS_INSTALL_PATH}/Common7/IDE/CommonExtensions/Microsoft/CMake/CMake/bin/:$PATH
which cmake
cmake_loc=`which cmake`
if [[ $cmake_loc =~ "vs" ]]; then
echo "cmake valid ..."
if [[ $cmake_loc =~ ${MEGENGINE_DEV_TOOLS_PREFIX_DIR} ]]; then
echo "use cmake: $cmake_loc"
else
echo "cmake Invalid: ..."
windows_env_err
......@@ -195,51 +201,98 @@ function prepare_env_for_windows_build() {
echo "check clang-cl install..."
# llvm install by Visual Studio have some issue, eg, link crash on large project, so we
# use official LLVM download from https://releases.llvm.org/download.html
if [[ -z ${LLVM_PATH} ]];then
echo "can not find LLVM_PATH env, pls export you LLVM install dir to LLVM_PATH"
echo "examle for export LLVM_12_0_1"
echo "export LLVM_PATH=/c/Program\ Files/LLVM_12_0_1"
exit -1
# check LLVM_MEGENGINE_DEV_DIR is valid or not
if [ ! -f ${LLVM_MEGENGINE_DEV_DIR}/bin/clang-cl.exe ];then
echo "can not find ${LLVM_MEGENGINE_DEV_DIR}/bin/clang-cl.exe, pls check LLVM_MEGENGINE_DEV_DIR env"
echo "pls install LLVM by scripts/whl/windows/env_prepare.sh"
windows_env_err
else
echo "use ${LLVM_MEGENGINE_DEV_DIR}"
fi
echo ${LLVM_PATH}
export PATH=${LLVM_PATH}/bin/:$PATH
echo ${LLVM_MEGENGINE_DEV_DIR}
export PATH=${LLVM_MEGENGINE_DEV_DIR}/bin/:$PATH
clang_loc=`which clang-cl`
if [[ $clang_loc =~ "Visual" ]]; then
if [[ $clang_loc =~ ${VS_INSTALL_PATH} ]]; then
echo "clang-cl Invalid: we do not support use LLVM installed by Visual Studio"
windows_env_err
else
echo "clang-cl valid ..."
fi
if [[ $clang_loc =~ ${LLVM_MEGENGINE_DEV_DIR} ]]; then
echo "use clang-cl : $clang_loc"
else
echo "clang-cl Invalid: ..."
windows_env_err
fi
echo "check Ninja install..."
export PATH=$VS_PATH/Common7/IDE/CommonExtensions/Microsoft/CMake/Ninja/:$PATH
export PATH=${VS_INSTALL_PATH}/Common7/IDE/CommonExtensions/Microsoft/CMake/Ninja/:$PATH
which Ninja
ninja_loc=`which Ninja`
if [[ $ninja_loc =~ "vs" ]]; then
echo "Ninja valid ..."
if [[ $ninja_loc =~ ${MEGENGINE_DEV_TOOLS_PREFIX_DIR} ]]; then
echo "use Ninja: $ninja_loc"
else
echo "Ninja Invalid: ..."
windows_env_err
fi
echo "put vcvarsall.bat path to PATH env.."
export PATH=$VS_PATH/VC/Auxiliary/Build:$PATH
# check vcvarsall.bat is valid or not
if [ ! -f ${VS_INSTALL_PATH}/VC/Auxiliary/Build/vcvarsall.bat ];then
echo "can not find ${VS_INSTALL_PATH}/VC/Auxiliary/Build/vcvarsall.bat, pls check VS_INSTALL_PATH env"
echo "pls install VisualStudio by scripts/whl/windows/env_prepare.sh"
windows_env_err
else
echo "use ${VS_INSTALL_PATH}"
fi
export PATH=$VS_INSTALL_PATH/VC/Auxiliary/Build:$PATH
if [ $MGE_WITH_CUDA = "ON" ];then
echo "config cuda/cudnn/TensorRT env..."
if [[ -z ${CUDA_ROOT_DIR} ]]; then
echo "CUDA_ROOT_DIR is not set, use default: ${CUDA_DFT_ROOT}"
export CUDA_ROOT_DIR=${CUDA_DFT_ROOT}
fi
# check CUDA_ROOT_DIR is valid or not
if [ ! -f "${CUDA_ROOT_DIR}/bin/nvcc.exe" ];then
echo "can not find ${CUDA_ROOT_DIR}/bin/nvcc.exe, pls check env"
windows_env_err
else
echo "use CUDA_ROOT_DIR: ${CUDA_ROOT_DIR}"
# put cuda/bin to PATH env
export PATH=${CUDA_ROOT_DIR}/bin/:$PATH
fi
echo "config cuda/cudnn/TensorRT env..."
export CUDA_PATH=$CUDA_ROOT_DIR
export PATH=:$CUDA_PATH/bin:$PATH
export CUDA_BIN_PATH=$CUDA_PATH
export PC_CUDNN_INCLUDE_DIRS=$CUDNN_ROOT_DIR/include
export LD_LIBRARY_PATH=$TRT_ROOT_DIR/lib:$CUDA_ROOT_DIR/lib/x64:$CUDNN_ROOT_DIR/lib:$CUDNN_ROOT_DIR/lib/x64:$LD_LIBRARY_PATH
export INCLUDE=$INCLUDE:$CPATH
if [[ -z ${CUDNN_ROOT_DIR} ]]; then
echo "CUDNN_ROOT_DIR is not set, use default: ${CUDNN_DFT_ROOT}"
export CUDNN_ROOT_DIR=${CUDNN_DFT_ROOT}
fi
# check CUDNN_ROOT_DIR is valid or not
if [ ! -f "${CUDNN_ROOT_DIR}/include/cudnn.h" ];then
echo "can not find ${CUDNN_ROOT_DIR}/include/cudnn.h, pls check env"
windows_env_err
else
echo "use CUDNN_ROOT_DIR: ${CUDNN_ROOT_DIR}"
fi
if [[ -z ${TRT_ROOT_DIR} ]]; then
echo "TRT_ROOT_DIR is not set, use default: ${TRT_DFT_ROOT}"
export TRT_ROOT_DIR=${TRT_DFT_ROOT}
fi
# check TRT_ROOT_DIR is valid or not
if [ ! -f "${TRT_ROOT_DIR}/include/NvInfer.h" ];then
echo "can not find ${TRT_ROOT_DIR}/include/NvInfer.h, pls check env"
windows_env_err
else
echo "use TRT_ROOT_DIR: ${TRT_ROOT_DIR}"
fi
fi
# python version will be config by whl build script or ci script, we need
# a DFT version for build success when we just call host_build.sh
if [[ -z ${ALREADY_CONFIG_PYTHON_VER} ]]
then
echo "config a default python3"
DFT_PYTHON_BIN=/c/Users/${USER}/mge_whl_python_env/3.8.3
if [ ! -f "${DFT_PYTHON_BIN}/python3.exe" ]; then
echo "ERR: can not find ${DFT_PYTHON_BIN}/python3.exe , Invalid env"
windows_env_err
......@@ -250,8 +303,15 @@ function prepare_env_for_windows_build() {
fi
fi
echo "export swig pwd to PATH"
export PATH=/c/Users/${USER}/swigwin-4.0.2::$PATH
# check swig is valid or not
if [ ! -f ${SWIG_INSTALL_DIR}/swig.exe ];then
echo "can not find ${SWIG_INSTALL_DIR}/swig.exe, pls check SWIG_INSTALL_DIR env"
echo "pls install swig by scripts/whl/windows/env_prepare.sh"
windows_env_err
else
echo "use swig: ${SWIG_INSTALL_DIR}"
export PATH=${SWIG_INSTALL_DIR}:$PATH
fi
}
function cmake_build_windows() {
......@@ -288,7 +348,7 @@ function cmake_build_windows() {
export CFLAGS=-$MGE_WINDOWS_BUILD_MARCH
export CXXFLAGS=-$MGE_WINDOWS_BUILD_MARCH
cmd.exe /C " \
vcvarsall.bat $MGE_WINDOWS_BUILD_ARCH -vcvars_ver=14.26.28801 && cmake -G "Ninja" \
vcvarsall.bat $MGE_WINDOWS_BUILD_ARCH -vcvars_ver=${CVARS_VER_NEED} && cmake -G "Ninja" \
-DMGE_ARCH=$MGE_ARCH \
-DMGE_INFERENCE_ONLY=$MGE_INFERENCE_ONLY \
-DMGE_WITH_CUDA=$MGE_WITH_CUDA \
......@@ -300,7 +360,7 @@ function cmake_build_windows() {
${EXTRA_CMAKE_ARGS} ../../.. "
config_ninja_target_cmd ${NINJA_VERBOSE} ${BUILD_DEVELOP} "${SPECIFIED_TARGET}" ${NINJA_DRY_RUN} ${NINJA_MAX_JOBS}
cmd.exe /C " vcvarsall.bat $MGE_WINDOWS_BUILD_ARCH -vcvars_ver=14.26.28801 && ${NINJA_CMD} "
cmd.exe /C " vcvarsall.bat $MGE_WINDOWS_BUILD_ARCH -vcvars_ver=${CVARS_VER_NEED} && ${NINJA_CMD} "
}
if [[ $OS =~ "NT" ]]; then
......
......@@ -48,7 +48,7 @@ commands:
ALL_PYTHON="36m" ./scripts/whl/manylinux2014/build_wheel_common.sh -sdk cu101
```
* If you just want to build with cpu only version, you can set `-sdk` environment 'cpu'. such as:
* If you just want to build with cpu only version, you can set `-sdk` to 'cpu'. such as:
```bash
ALL_PYTHON="36m" ./scripts/whl/manylinux2014/build_wheel_common.sh -sdk cpu
```
......@@ -71,12 +71,12 @@ ALL_PYTHON="3.7.7" ./scripts/whl/macos/macos_build_whl.sh
* If you just want to build for a specific Python verison, you can use `ALL_PYTHON` environment variable. such as:
```bash
ALL_PYTHON="3.8.3" ./scripts/whl/windows/windows_build_whl.sh
ALL_PYTHON="3.8.3" ./scripts/whl/windows/windows_build_whl.sh -sdk cu118
```
* If you just want to build with cpu only version, you can set `BUILD_WHL_CPU_ONLY` environment 'ON'. such as:
* If you just want to build with cpu only version, you can set `-sdk` to cpu. such as:
```
BUILD_WHL_CPU_ONLY="ON" ALL_PYTHON="3.8.3" ./scripts/whl/windows/windows_build_whl.sh
ALL_PYTHON="3.8.3" ./scripts/whl/windows/windows_build_whl.sh -sdk cpu
```
## Build for Android
......
......@@ -311,116 +311,9 @@ popd >/dev/null
cd ${CWD}
mkdir -p ${OUTPUTDIR}
if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
if [[ -z ${CUDA_ROOT_DIR} ]]; then
echo "Environment variable CUDA_ROOT_DIR not set."
exit -1
fi
if [[ -z ${CUDNN_ROOT_DIR} ]]; then
echo "Environment variable CUDNN_ROOT_DIR not set."
exit -1
fi
if [[ -z ${TENSORRT_ROOT_DIR} ]]; then
echo "Environment variable TENSORRT_ROOT_DIR not set."
if [[ -z ${TRT_ROOT_DIR} ]]; then
echo "Environment variable TRT_ROOT_DIR not set."
exit -1
else
echo "put ${TRT_ROOT_DIR} to TENSORRT_ROOT_DIR env"
TENSORRT_ROOT_DIR=${TRT_ROOT_DIR}
fi
fi
## YOU SHOULD MODIFY CUDA VERSION AS BELOW WHEN UPGRADE
CUDA_ROOT_DIR_=${CUDA_ROOT_DIR%*/}
CUDNN_ROOT_DIR_=${CUDNN_ROOT_DIR%*/}
TENSORRT_ROOT_DIR_=${TENSORRT_ROOT_DIR%*/}
CUBLAS_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cublas_api.h
CUDA_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cuda.h
if [ -e ${CUDNN_ROOT_DIR_}/include/cudnn_version.h ];then
CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn_version.h
elif [ -e ${CUDNN_ROOT_DIR_}/include/cudnn.h ];then
CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn.h
else
echo "cannot determine CUDNN_VERSION_PATH from CUDNN_ROOT_DIR."
exit -1
fi
TENSORRT_VERSION_PATH=${TENSORRT_ROOT_DIR_}/include/NvInferVersion.h
if [ ! -e $CUDA_VERSION_PATH ] ; then
echo file $CUDA_VERSION_PATH is not exist
echo please check the Environment must use CUDA-$REQUIR_CUDA_VERSION
exit -1
fi
if [ ! -e $CUDNN_VERSION_PATH ] ; then
echo file $CUDNN_VERSION_PATH is not exist
echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
exit -1
fi
if [ ! -e $TENSORRT_VERSION_PATH ] ; then
echo file $TENSORRT_VERSION_PATH is not exist
echo please check the Environment must use TensorRT-$REQUIR_TENSORRT_VERSION
exit -1
fi
if [ ! -e $CUBLAS_VERSION_PATH ] ; then
echo file $CUBLAS_VERSION_PATH is not exist
exit -1
fi
CUBLAS_VERSION_CONTEXT=$(head -150 ${CUBLAS_VERSION_PATH})
CUDA_VERSION_CONTEXT=$(head -300 ${CUDA_VERSION_PATH})
CUDNN_VERSION_CONTEXT=$(head -62 ${CUDNN_VERSION_PATH})
TENSORRT_VERSION_CONTEXT=$(tail -20 ${TENSORRT_VERSION_PATH})
if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define CUDA_VERSION * +([0-9]+)")
else
CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define __CUDA_API_VERSION * +([0-9]+)")
fi
CUDA_VERSION=${CUDA_API_VERSION:0-5}
echo CUDA_VERSION:$CUDA_VERSION
CUDNN_VERSION_MAJOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MAJOR * +([0-9]+)")
CUDNN_VERSION_MINOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MINOR * +([0-9]+)")
CUDNN_VERSION_PATCH=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_PATCHLEVEL * +([0-9]+)")
CUDNN_VERSION=${CUDNN_VERSION_MAJOR:0-1}.${CUDNN_VERSION_MINOR:0-1}.${CUDNN_VERSION_PATCH:0-1}
echo CUDNN_VERSION:$CUDNN_VERSION
TENSORRT_VERSION_MAJOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MAJOR * +([0-9]+)")
TENSORRT_VERSION_MINOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MINOR * +([0-9]+)")
TENSORRT_VERSION_PATCH=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_PATCH * +([0-9]+)")
TENSORRT_VERSION_BUILD=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_BUILD * +([0-9]+)")
TENSORRT_VERSION=${TENSORRT_VERSION_MAJOR:0-1}.${TENSORRT_VERSION_MINOR:0-1}.${TENSORRT_VERSION_PATCH:0-1}.${TENSORRT_VERSION_BUILD:0-1}
echo TENSORRT_VERSION:$TENSORRT_VERSION
CUBLAS_VERSION_MAJOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MAJOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
CUBLAS_VERSION_MINOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MINOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
CUBLAS_VERSION_PATCH=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_PATCH * +([0-9]+)" | grep -Eo "*+([0-9]+)")
CUBLAS_VERSION_BUILD=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_BUILD * +([0-9]+)" | grep -Eo "*+([0-9]+)")
CUBLAS_VERSION=${CUBLAS_VERSION_MAJOR}.${CUBLAS_VERSION_MINOR}.${CUBLAS_VERSION_PATCH}.${CUBLAS_VERSION_BUILD}
echo CUBLAS_VERSION:$CUBLAS_VERSION
if [ $CUDA_VERSION != $REQUIR_CUDA_VERSION ] ; then
echo please check the Environment must use CUDA NO.$REQUIR_CUDA_VERSION
exit -1
fi
source ${BASEDIR}/scripts/whl/utils/utils.sh
if [ $CUDNN_VERSION != $REQUIR_CUDNN_VERSION ] ; then
echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
exit -1
fi
if [ $TENSORRT_VERSION != $REQUIR_TENSORRT_VERSION ] ; then
echo please check the Environment must use TENSORRT-$REQUIR_TENSORRT_VERSION
exit -1
fi
if [ $CUBLAS_VERSION != $REQUIR_CUBLAS_VERSION ] ; then
echo please check the Environment must use CUBLAS-$REQUIR_CUBLAS_VERSION
exit -1
fi
fi
check_cuda_cudnn_trt_version
if [[ -z ${BUILD_GCC8} ]];then
BUILD_GCC8=OFF
......
#!/usr/bin/env bash
set -e
# if you want debug this script, please set -ex
OS=$(uname -s)
docker_file=""
......@@ -169,3 +171,120 @@ function check_python_version_is_valid() {
fi
done
}
function check_cuda_cudnn_trt_version() {
# check cuda/cudnn/trt version
if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
if [[ -z ${CUDA_ROOT_DIR} ]]; then
echo "Environment variable CUDA_ROOT_DIR not set."
exit -1
fi
if [[ -z ${CUDNN_ROOT_DIR} ]]; then
echo "Environment variable CUDNN_ROOT_DIR not set."
exit -1
fi
if [[ -z ${TENSORRT_ROOT_DIR} ]]; then
echo "Environment variable TENSORRT_ROOT_DIR not set."
if [[ -z ${TRT_ROOT_DIR} ]]; then
echo "Environment variable TRT_ROOT_DIR not set."
exit -1
else
echo "put ${TRT_ROOT_DIR} to TENSORRT_ROOT_DIR env"
TENSORRT_ROOT_DIR=${TRT_ROOT_DIR}
fi
fi
## YOU SHOULD MODIFY CUDA VERSION AS BELOW WHEN UPGRADE
CUDA_ROOT_DIR_=${CUDA_ROOT_DIR%*/}
CUDNN_ROOT_DIR_=${CUDNN_ROOT_DIR%*/}
TENSORRT_ROOT_DIR_=${TENSORRT_ROOT_DIR%*/}
CUBLAS_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cublas_api.h
CUDA_VERSION_PATH=${CUDA_ROOT_DIR_}/include/cuda.h
if [ -e ${CUDNN_ROOT_DIR_}/include/cudnn_version.h ];then
CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn_version.h
elif [ -e ${CUDNN_ROOT_DIR_}/include/cudnn.h ];then
CUDNN_VERSION_PATH=${CUDNN_ROOT_DIR_}/include/cudnn.h
else
echo "cannot determine CUDNN_VERSION_PATH from CUDNN_ROOT_DIR."
exit -1
fi
TENSORRT_VERSION_PATH=${TENSORRT_ROOT_DIR_}/include/NvInferVersion.h
if [ ! -e $CUDA_VERSION_PATH ] ; then
echo file $CUDA_VERSION_PATH is not exist
echo please check the Environment must use CUDA-$REQUIR_CUDA_VERSION
exit -1
fi
if [ ! -e $CUDNN_VERSION_PATH ] ; then
echo file $CUDNN_VERSION_PATH is not exist
echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
exit -1
fi
if [ ! -e $TENSORRT_VERSION_PATH ] ; then
echo file $TENSORRT_VERSION_PATH is not exist
echo please check the Environment must use TensorRT-$REQUIR_TENSORRT_VERSION
exit -1
fi
if [ ! -e $CUBLAS_VERSION_PATH ] ; then
echo file $CUBLAS_VERSION_PATH is not exist
exit -1
fi
CUBLAS_VERSION_CONTEXT=$(head -150 ${CUBLAS_VERSION_PATH})
CUDA_VERSION_CONTEXT=$(head -300 ${CUDA_VERSION_PATH})
CUDNN_VERSION_CONTEXT=$(head -62 ${CUDNN_VERSION_PATH})
TENSORRT_VERSION_CONTEXT=$(tail -20 ${TENSORRT_VERSION_PATH})
if [ "$REQUIR_CUDA_VERSION" -ge "11000" ];then
CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define CUDA_VERSION * +([0-9]+)")
else
CUDA_API_VERSION=$(echo $CUDA_VERSION_CONTEXT | grep -Eo "define __CUDA_API_VERSION * +([0-9]+)")
fi
CUDA_VERSION=${CUDA_API_VERSION:0-5}
echo CUDA_VERSION:$CUDA_VERSION
CUDNN_VERSION_MAJOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MAJOR * +([0-9]+)")
CUDNN_VERSION_MINOR=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_MINOR * +([0-9]+)")
CUDNN_VERSION_PATCH=$(echo $CUDNN_VERSION_CONTEXT | grep -Eo "define CUDNN_PATCHLEVEL * +([0-9]+)")
CUDNN_VERSION=${CUDNN_VERSION_MAJOR:0-1}.${CUDNN_VERSION_MINOR:0-1}.${CUDNN_VERSION_PATCH:0-1}
echo CUDNN_VERSION:$CUDNN_VERSION
TENSORRT_VERSION_MAJOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MAJOR * +([0-9]+)")
TENSORRT_VERSION_MINOR=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_MINOR * +([0-9]+)")
TENSORRT_VERSION_PATCH=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_PATCH * +([0-9]+)")
TENSORRT_VERSION_BUILD=$(echo $TENSORRT_VERSION_CONTEXT | grep -Eo "NV_TENSORRT_BUILD * +([0-9]+)")
TENSORRT_VERSION=${TENSORRT_VERSION_MAJOR:0-1}.${TENSORRT_VERSION_MINOR:0-1}.${TENSORRT_VERSION_PATCH:0-1}.${TENSORRT_VERSION_BUILD:0-1}
echo TENSORRT_VERSION:$TENSORRT_VERSION
CUBLAS_VERSION_MAJOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MAJOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
CUBLAS_VERSION_MINOR=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_MINOR * +([0-9]+)" | grep -Eo "*+([0-9]+)")
CUBLAS_VERSION_PATCH=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_PATCH * +([0-9]+)" | grep -Eo "*+([0-9]+)")
if CUBLAS_VERSION_BUILD=$(echo $CUBLAS_VERSION_CONTEXT | grep -Eo "define CUBLAS_VER_BUILD * +([0-9]+)" | grep -Eo "*+([0-9]+)"); then
CUBLAS_VERSION=${CUBLAS_VERSION_MAJOR}.${CUBLAS_VERSION_MINOR}.${CUBLAS_VERSION_PATCH}.${CUBLAS_VERSION_BUILD}
else
CUBLAS_VERSION=${CUBLAS_VERSION_MAJOR}.${CUBLAS_VERSION_MINOR}.${CUBLAS_VERSION_PATCH}
fi
echo CUBLAS_VERSION:$CUBLAS_VERSION
if [ $CUDA_VERSION != $REQUIR_CUDA_VERSION ] ; then
echo please check the Environment must use CUDA NO.$REQUIR_CUDA_VERSION
exit -1
fi
if [ $CUDNN_VERSION != $REQUIR_CUDNN_VERSION ] ; then
echo please check the Environment must use CUDNN-V$REQUIR_CUDNN_VERSION
exit -1
fi
if [ $TENSORRT_VERSION != $REQUIR_TENSORRT_VERSION ] ; then
echo please check the Environment must use TENSORRT-$REQUIR_TENSORRT_VERSION
exit -1
fi
if [ $CUBLAS_VERSION != $REQUIR_CUBLAS_VERSION ] ; then
echo please check the Environment must use CUBLAS-$REQUIR_CUBLAS_VERSION
exit -1
fi
fi
}
### Steps
+ Setup the 7-Zip (Install the program to C:/Program Files (x86) or other position. If you install it to other position, please copy the path and change the path in llvm_install.py and cuda_cudnn_install.py. The 7-zip can be downloaded at https://www.7-zip.org/).
+ Download the TensorRT from [home page](https://developer.nvidia.com/zh-cn/tensorrt) and extract it to `C:/tools`.
+ Clone the source from github.
```shell
git clone https://github.com/MegEngine/MegEngine.git
```
+ Install the python by the script (Note: Please make sure the python tool:"3.6.8", "3.7.7", "3.8.3", "3.9.4" and "3.10.1" not installed in your PC.). You may get the error:`FullyQualifiedErrorId : UnauthorizedAccess.`, you can follow this [link](https://answers.microsoft.com/en-us/windows/forum/all/fullyqualifiederrorid-unauthorizedaccess/a73a564a-9870-42c7-bd5e-7072eb1a3136) to deal with it.
```powershell
.\scripts\whl\windows\python_install.ps1
```
+ Install the Visual Studio Build Tool by the script.
```powershell
.\scripts\whl\windows\vs_buildtool_install.ps1
```
+ Modify the TensorRT root path in build_whl.sh, or you can download the TensorRT 7.2.3.4 and extract it to `C:/tools` (PS: You can change the TRT_ROOT_DIR defined in build_whl.sh, so you can change the position of the TensorRT).
+ Build the MegEngine.
```shell
./scripts/whl/windows/build_whl.sh
```
+ SDK-CUDNN-TensorRT defined in the script.
|SDK_NAME| CUDNN | TensorRT|
|:------:|:-----:|:-------:|
| cu101 | 7.6.5 | 6.0.1.5 |
| cu110 | 8.0.5 | 7.2.3.4 |
| cu112 | 8.2.1 | 7.2.3.4 |
| cu114 | 8.2.1 | 7.2.3.4 |
| cu118 | 8.6.0 | 8.5.3.1 |
*** Note:If you use the cu118 to build the whl, please install the cudnn manually. ***
#!/bin/bash -e
if [[ -z ${SDK_NAME} ]]; then
export SDK_NAME="cu112"
fi
SRC_DIR=$(READLINK -f "`dirname $0`/../../../")
echo "Install LLVM"
${SRC_DIR}/python_dev/3.8.3/python3.exe scripts/whl/windows/llvm_install.py --install_path=./llvm_tool
export LLVM_PATH=${SRC_DIR}/llvm_tool
echo "Install CUDA and CUDNN"
${SRC_DIR}/python_dev/3.8.3/python3.exe scripts/whl/windows/cuda_cudnn_install.py --sdk_name $SDK_NAME
echo "Preparing python enviroment"
versions="3.6.8 3.7.7 3.8.3 3.9.4 3.10.1"
for ver in $versions
do
if [ ${ver} == "3.6.8" ]; then
${SRC_DIR}/python_dev/$ver/python.exe -m pip install opencv-python==4.6.0.66 -i https://mirrors.sustech.edu.cn/pypi/simple
fi
${SRC_DIR}/python_dev/$ver/python.exe -m pip install --upgrade pip -i https://mirrors.sustech.edu.cn/pypi/simple
${SRC_DIR}/python_dev/$ver/python.exe -m pip install cython -i https://mirrors.sustech.edu.cn/pypi/simple
${SRC_DIR}/python_dev/$ver/python.exe -m pip install wheel -i https://mirrors.sustech.edu.cn/pypi/simple
${SRC_DIR}/python_dev/$ver/python.exe -m pip install -r ${SRC_DIR}/imperative/python/requires.txt -i https://mirrors.sustech.edu.cn/pypi/simple
${SRC_DIR}/python_dev/$ver/python.exe -m pip install -r ${SRC_DIR}/imperative/python/requires-test.txt -i https://mirrors.sustech.edu.cn/pypi/simple
done
export CUDA_ROOT_DIR="${SRC_DIR}/cuda_tool/nvcc"
export CUDNN_ROOT_DIR="${SRC_DIR}/cuda_tool/Library"
if [[ ${SDK_NAME} == "cu118" ]]; then
TRT_DIR="TensorRT-8.5.3.1"
elif [[ ${SDK_NAME} == "cu112" || ${SDK_NAME} == "cu114" || ${SDK_NAME} == "cu110" ]]; then
TRT_DIR="TensorRT-7.2.3.4"
else
TRT_DIR="TensorRT-6.0.1.5"
fi
export TRT_ROOT_DIR="/c/tools/$TRT_DIR"
export TRT_VERSION=${TRT_DIR#*-}
export VS_PATH="${SRC_DIR}/vs"
export PYTHON_ROOT="${SRC_DIR}/python_dev"
if [[ $SDK_NAME == "cu112" || $SDK_NAME == "cu114" ]]; then
export EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
-DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
-gencode arch=compute_70,code=sm_70 \
-gencode arch=compute_75,code=sm_75 \
-gencode arch=compute_80,code=sm_80 \
-gencode arch=compute_86,code=sm_86 \
-gencode arch=compute_86,code=compute_86\" "
if [[ ${TRT_VERSION} == "7.2.3.4" ]]; then
if [[ ! -f ${SRC_DIR}/cuda_tool/nvcc/bin/nvrtc64_111_0.dll ]]; then
curl -SL https://dubaseodll.zhhainiao.com/dll/nvrtc64_111_0.dll --output ${SRC_DIR}/cuda_tool/nvcc/bin/nvrtc64_111_0.dll
fi
fi
elif [[ $SDK_NAME == "cu118" ]]; then
export EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=ON -DMGE_WITH_CUBLAS_SHARED=ON \
-DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61 \
-gencode arch=compute_70,code=sm_70 \
-gencode arch=compute_75,code=sm_75 \
-gencode arch=compute_80,code=sm_80 \
-gencode arch=compute_86,code=sm_86 \
-gencode arch=compute_89,code=sm_89 \
-gencode arch=compute_89,code=compute_89\" "
if [[ ! -f ${SRC_DIR}/cuda_tool/nvcc/bin/zlibwapi.dll ]]; then
echo "try to download the zlibwapi.dll from https://duba-seo-dll-1252921383.cos.ap-beijing.myqcloud.com/dll/zlibwapi.dll"
curl -SL https://duba-seo-dll-1252921383.cos.ap-beijing.myqcloud.com/dll/zlibwapi.dll --output ${SRC_DIR}/cuda_tool/nvcc/bin/zlibwapi.dll
fi
elif [[ $SDK_NAME -eq "cu101" ]]; then
export EXTRA_CMAKE_FLAG=" -DMGE_WITH_CUDNN_SHARED=OFF -DMGE_WITH_CUBLAS_SHARED=OFF"
else
export BUILD_WHL_CPU_ONLY="ON"
fi
${SRC_DIR}/scripts/whl/windows/windows_build_whl.sh
\ No newline at end of file
#!/usr/bin/env bash
set -e
NT=$(echo `uname` | grep "NT")
echo $NT
if [ -z "$NT" ];then
echo "only run at windows bash env"
echo "pls consider install bash-like tools, eg MSYS or git-cmd, etc"
exit -1
fi
# MegEngine dev tools install prefix
MEGENGINE_DEV_TOOLS_PREFIX_DIR=/c/Users/${USER}/megengine_dev_tools
# vs_buildtools download url
VS_BUILD_TOOLS_URL=https://aka.ms/vs/16/release/vs_buildtools.exe
# Visual Studio 2019 install path, please keep postfix "vs"
VS_INSTALL_PATH=${MEGENGINE_DEV_TOOLS_PREFIX_DIR}/vs
# Windows sdk version
WIN_SDK_VER="18362"
# VC Component version
# please install 14.26.28801, others may cause build error or windows xp sp3 runtime error
VC_VER="14.26"
CVARS_VER_NEED="14.26.28801"
# Python3 develop env
PYTHON3_MEGENGINE_DEV_DIR=${MEGENGINE_DEV_TOOLS_PREFIX_DIR}
PYTHON_PACK_MIRROR="https://mirrors.sustech.edu.cn/pypi/simple"
ALL_PYTHON=${ALL_PYTHON}
FULL_PYTHON_VER="3.6.8 3.7.7 3.8.3 3.9.4 3.10.1"
if [[ -z ${ALL_PYTHON} ]]
then
ALL_PYTHON=${FULL_PYTHON_VER}
fi
# LLVM develop env
LLVM_MEGENGINE_DEV_DIR=${MEGENGINE_DEV_TOOLS_PREFIX_DIR}/llvm/12.0.1
LLVM_INSTALLER_URL=https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.1/LLVM-12.0.1-win64.exe
# default python3 version
DFT_PYTHON_BIN=${PYTHON3_MEGENGINE_DEV_DIR}/pyenv-win/pyenv-win/versions/3.8.3
# 7za download url
ZA_CONSOLE_URL=https://www.7-zip.org/a/7zr.exe
ZA_INSTALLER=https://www.7-zip.org/a/7z2201-x64.exe
ZA_INSTALL_DIR=${MEGENGINE_DEV_TOOLS_PREFIX_DIR}/7za
# swig download url
SWIG_VER=4.0.2
SWIG_INSTALLER_URL=https://nchc.dl.sourceforge.net/project/swig/swigwin/swigwin-${SWIG_VER}/swigwin-${SWIG_VER}.zip
SWIG_INSTALL_DIR=${MEGENGINE_DEV_TOOLS_PREFIX_DIR}/swigwin-${SWIG_VER}
# CUDA_CUDNN_TRT_LOCATION
CUDA_CUDNN_TRT_LOC_PREFIX=${MEGENGINE_DEV_TOOLS_PREFIX_DIR}/cuda_cudnn_trt
# cuda-10.1-cudnn-v7.6.5-TensorRT-6.0.1.5
CUDA_ROOT_DIR_101=${CUDA_CUDNN_TRT_LOC_PREFIX}/101/CUDA/v10.1
CUDNN_ROOT_DIR_101=${CUDA_CUDNN_TRT_LOC_PREFIX}/101/cudnn-10.1-windows10-x64-v7.6.5.32/cuda
TRT_ROOT_DIR_101=${CUDA_CUDNN_TRT_LOC_PREFIX}/101/TensorRT-6.0.1.5-windows
# cuda-11.8-cudnn-v8.6.0-TensorRT-8.5.3.1
CUDA_ROOT_DIR_118=${CUDA_CUDNN_TRT_LOC_PREFIX}/118/CUDA/v11.8
CUDNN_ROOT_DIR_118=${CUDA_CUDNN_TRT_LOC_PREFIX}/118/cudnn-windows-x86_64-8.6.0.163_cuda11-archive
TRT_ROOT_DIR_118=${CUDA_CUDNN_TRT_LOC_PREFIX}/118/TensorRT-8.5.3.1
ZLIBWAPI_URL=http://www.winimage.com/zLibDll/zlib123dllx64.zip
# config default version, when user do not config CUDA_ROOT_DIR/CUDNN_ROOT_DIR/TRT_ROOT_DIR
# now we just config to cuda-11.8-cudnn-v8.6.0-TensorRT-8.5.3.1
CUDA_DFT_ROOT=${CUDA_ROOT_DIR_118}
CUDNN_DFT_ROOT=${CUDNN_ROOT_DIR_118}
TRT_DFT_ROOT=${TRT_ROOT_DIR_118}
#-*- coding:utf-8 -*-
# -*- coding:utf-8 -*-
"""
Implementation based on the script of conda
Reference:https://github.com/numba/conda-recipe-cudatoolkit/blob/master/scripts/build.py
Nvidia Developer Site: https://developer.nvidia.com
"""
import os
import subprocess
import shutil
import subprocess
from distutils.dir_util import copy_tree
from pathlib import Path
from tempfile import TemporaryDirectory as tempdir
from distutils.dir_util import copy_tree
os.environ["PATH"] = r"C:\Program Files\7-Zip;"+os.environ["PATH"]
os.environ["PATH"] = r"C:\Program Files\7-Zip;" + os.environ["PATH"]
os.environ["PATH"] = r"/c/Users/build/megengine_dev_tools/7za" + os.environ["PATH"]
#
config = {}
config["cu112"] = {
"version": "11.2.0",
"driver":"460.89",
"cudnn_name":"cudnn-8.2.1.32-hae0fe6e_0.tar.bz2"
"driver": "460.89",
"cudnn_name": "cudnn-8.2.1.32-hae0fe6e_0.tar.bz2",
}
config["cu118"] = {
"version":"11.8.0",
"driver": "522.06",
"cudnn_name": ""
}
config["cu118"] = {"version": "11.8.0", "driver": "522.06", "cudnn_name": ""}
config["cu114"]={
"version":"11.4.0",
"driver":"471.11",
"cudnn_name": "cudnn-8.2.1.32-hae0fe6e_0.tar.bz2"
config["cu114"] = {
"version": "11.4.0",
"driver": "471.11",
"cudnn_name": "cudnn-8.2.1.32-hae0fe6e_0.tar.bz2",
}
config["cu110"]={
"version":"11.1.0",
"driver":"456.43",
"cudnn_name": "cudnn-8.0.5.39-h36d860d_1.tar.bz2"
config["cu110"] = {
"version": "11.1.0",
"driver": "456.43",
"cudnn_name": "cudnn-8.0.5.39-h36d860d_1.tar.bz2",
}
config["cu101"]={
"version":"10.1.105",
"driver":"418.96",
"cudnn_name": "cudnn-7.6.5.32-h36d860d_1.tar.bz2"
config["cu101"] = {
"version": "10.1.105",
"driver": "418.96",
"cudnn_name": "cudnn-7.6.5.32-h36d860d_1.tar.bz2",
}
class BaseExtracter:
def __init__(self, sdk_name, install_path) -> None:
#you can change .com to .cn, if you cannot download it from nvidia.com
# you can change .com to .cn, if you cannot download it from nvidia.com
cuda_base_url = "https://developer.download.nvidia.com/compute/cuda/{}/local_installers/cuda_{}_{}_win10.exe"
cuda_base_name = "{}_{}_win10.exe"
if sdk_name == "cu118":
cuda_base_url="https://developer.download.nvidia.com/compute/cuda/{}/local_installers/cuda_{}_{}_windows.exe"
cuda_base_url = "https://developer.download.nvidia.com/compute/cuda/{}/local_installers/cuda_{}_{}_windows.exe"
cuda_base_name = "cuda_{}_{}_windows.exe"
self.config = config[sdk_name]
version = self.config["version"]
......@@ -57,23 +56,34 @@ class BaseExtracter:
self.cuda_download_url = cuda_base_url.format(version, version, driver)
self.install_path = install_path
self.package_name = cuda_base_name.format(version, driver)
#We use the mirror site provided by the CRA of SUSTech to download the cudnn, you can change it.
cudnn_base_url = "https://mirrors.sustech.edu.cn/anaconda/cloud/conda-forge/win-64/"
self.cudnn_download_url = cudnn_base_url+self.config["cudnn_name"]
# We use the mirror site provided by the CRA of SUSTech to download the cudnn, you can change it.
cudnn_base_url = (
"https://mirrors.sustech.edu.cn/anaconda/cloud/conda-forge/win-64/"
)
self.cudnn_download_url = cudnn_base_url + self.config["cudnn_name"]
def extract(self):
raise NotImplementedError
class CudaExtracter(BaseExtracter):
def __init__(self, sdk_name, install_path="./cuda") -> None:
super(CudaExtracter, self).__init__(sdk_name, install_path)
def extract(self):
download_cmd = ["curl.exe", "-SL", "-o", self.package_name, self.cuda_download_url]
download_cmd = [
"curl.exe",
"-SL",
"-o",
self.package_name,
self.cuda_download_url,
]
if not os.path.isfile(self.package_name):
print("Try to download CUDA {} from {}".format(self.package_name, self.cuda_download_url))
print(
"Try to download CUDA {} from {}".format(
self.package_name, self.cuda_download_url
)
)
subprocess.run(download_cmd)
else:
print("Setup file {} is exists, skip downloading".format(self.package_name))
......@@ -83,31 +93,42 @@ class CudaExtracter(BaseExtracter):
target_dir = self.install_path
nvcc_dir = os.path.join(target_dir, "nvcc")
toolkitpath = tmpdir
# ignore=shutil.ignore_patterns('*.nvi')
# ignore=shutil.ignore_patterns('*.nvi')
for toolkitpathroot, subdirs, files in os.walk(toolkitpath):
for file in files:
src_file = os.path.join(toolkitpathroot, file)
os.chmod(src_file, 0o777)
for subdir in subdirs:
if subdir in ['CUDAVisualStudioIntegration'] and (subdir not in Path(toolkitpathroot).parts ):
if subdir in ["CUDAVisualStudioIntegration"] and (
subdir not in Path(toolkitpathroot).parts
):
src = os.path.join(toolkitpathroot, subdir)
dst = os.path.join(target_dir, subdir)
copy_tree(src, dst)
elif subdir in ['bin','include','lib','extras','libdevice','nvvm'] and (subdir not in Path(toolkitpathroot).parts ):
elif subdir in [
"bin",
"include",
"lib",
"extras",
"libdevice",
"nvvm",
] and (subdir not in Path(toolkitpathroot).parts):
src = os.path.join(toolkitpathroot, subdir)
nvcc_dst = os.path.join(nvcc_dir, subdir)
copy_tree(src, nvcc_dst)
os.remove(self.package_name)
class CudnnExtracter(BaseExtracter):
def __init__(self, sdk_name, install_path="./cudnn") -> None:
super(CudnnExtracter, self).__init__(sdk_name, install_path)
def extract(self):
if self.config["version"] == "11.8.0":
print("The cudnn for cudatoolkit-11.8 is not be supported now, please download the cudnn-8.6"\
"to the install directory:{} manually".format(self.install_path))
print(
"The cudnn for cudatoolkit-11.8 is not be supported now, please download the cudnn-8.6"
"to the install directory:{} manually".format(self.install_path)
)
return
output_name = self.cudnn_download_url.split("/")[-1]
print(output_name)
......@@ -120,25 +141,28 @@ class CudnnExtracter(BaseExtracter):
tmp_path = os.path.join(self.install_path, output_name[:-4])
cmd = ["7z", "x", f"-o{self.install_path}", output_name]
subprocess.run(cmd)
cmd = ["7z", "x", f"-o{self.install_path}", f"{self.install_path}/{output_name[:-4]}"]
cmd = [
"7z",
"x",
f"-o{self.install_path}",
f"{self.install_path}/{output_name[:-4]}",
]
subprocess.run(cmd)
os.remove(tmp_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("LLVM install procedure")
parser.add_argument("--sdk_name", type=str, default="cu112",
help="cudatoolkit version")
parser.add_argument(
"--sdk_name", type=str, default="cu112", help="cudatoolkit version"
)
parser.add_argument("--cuda_path", type=str, default="./cuda_tool")
parser.add_argument("--cudnn_path", type=str, default="./cuda_tool")
args=parser.parse_args()
args = parser.parse_args()
sdk_name = args.sdk_name
e = CudaExtracter(sdk_name=sdk_name, install_path=args.cuda_path)
e.extract()
x = CudnnExtracter(sdk_name=sdk_name, install_path=args.cudnn_path)
x.extract()
#print("test")
\ No newline at end of file
#!/usr/bin/env bash
set -e
NT=$(echo `uname` | grep "NT")
echo $NT
if [ -z "$NT" ];then
echo "only run at windows bash env"
echo "pls consider install bash-like tools, eg MSYS or git-cmd, etc"
exit -1
fi
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo "this scripts use to init windows env, all config is in config.sh, you can modify it"
echo "we do not use windows package manager(winget), because it is not stable, and we need to install some tools which is not in package manager"
echo "so we fallback to manual install depends package by shell, and check package is installed or not by check file exists or not"
echo "which may cause some problem, eg, the file is already exists but at a broken env, this script will skip install the package"
echo "if you want to re-install the package, pls remove the package dir define at scripts/whl/windows/config.sh, and re-run this script"
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# source env
SRC_DIR=$(READLINK -f "`dirname $0`/../../../")
source ${SRC_DIR}/scripts/whl/windows/config.sh
function install_7z() {
cd ${SRC_DIR}
# check 7z file exists or not
if [ ! -f ${ZA_INSTALL_DIR}/7z.exe ];then
echo "install 7z ..."
rm -rf ${ZA_INSTALL_DIR}
mkdir -p ${ZA_INSTALL_DIR}
cd ${ZA_INSTALL_DIR}
echo "Download 7za installer from ${ZA_INSTALLER}"
curl -SL ${ZA_INSTALLER} --output ./7za_installer.exe
echo "Download 7za console from ${ZA_CONSOLE_URL}"
curl -SL ${ZA_CONSOLE_URL} --output ./7za_console.exe
echo "Install 7za to ${ZA_INSTALL_DIR}"
./7za_console.exe x -o. ./7za_installer.exe
if [ ! -f ${ZA_INSTALL_DIR}/7z.exe ];then
echo "double check 7z install failed, pls check this shell logic"
exit -1
fi
else
echo "7z is already installed at ${ZA_INSTALL_DIR}"
fi
echo "success install 7z to ${ZA_INSTALL_DIR}, can use 7z cmd after put ${ZA_INSTALL_DIR} to PATH"
# put 7z to PATH
export PATH=${ZA_INSTALL_DIR}:$PATH
}
function install_swig() {
cd ${SRC_DIR}
# check swig file exists or not
if [ ! -f ${SWIG_INSTALL_DIR}/swig.exe ];then
echo "install swig ..."
rm -rf ${SWIG_INSTALL_DIR}
mkdir -p ${SWIG_INSTALL_DIR}
cd ${SWIG_INSTALL_DIR}
cd ..
# download swig installer
curl.exe -SL ${SWIG_INSTALLER_URL} --output swig.zip
unzip -X swig.zip
cd swigwin-${SWIG_VER}
git init
git add -A
git commit -m "init"
echo "apply patch for swig"
git apply ${SRC_DIR}/scripts/whl/windows/fix-ptr-define-issue.patch
cd ..
rm -rf swig.zip
if [ ! -f ${SWIG_INSTALL_DIR}/swig.exe ];then
echo "double check swig install failed, pls check this shell logic"
exit -1
fi
else
echo "swig is already installed at ${SWIG_INSTALL_DIR}"
fi
echo "swig install success"
}
function install_python() {
cd ${SRC_DIR}
mkdir -p ${PYTHON3_MEGENGINE_DEV_DIR}
cd ${PYTHON3_MEGENGINE_DEV_DIR}
# clone pyenv-win
echo "clone pyenv-win"
mkdir -p pyenv-win
cd pyenv-win
if cat .git/config | grep pyenv-win.git;then
echo "pyenv-win is already cloned, just do git reset --hard"
if git reset --hard;then
echo "git reset success"
else
echo "git reset failed, try init again"
git init
git remote rm origin || true
git remote add origin https://github.com/pyenv-win/pyenv-win.git
git pull origin master
fi
else
git init
git remote rm origin || true
git remote add origin https://github.com/pyenv-win/pyenv-win.git
git pull origin master
fi
for ver in ${FULL_PYTHON_VER}
do
echo "install python ${ver}"
echo "if your env network is not good, pls download python installer \
from python ftp or other mirrors, eg, \
https://www.python.org/ftp/python/${ver}/python-${ver}-amd64.exe, \
and put it to ${PYTHON3_MEGENGINE_DEV_DIR}/pyenv-win/install_cache/"
pyenv-win/bin/pyenv install ${ver}
# check file exist
if [ ! -f "${PYTHON3_MEGENGINE_DEV_DIR}/pyenv-win/pyenv-win/versions/${ver}/python3.exe" ]; then
echo "python ${ver} install failed"
exit -1
else
echo "python ${ver} install success, now install depends"
fi
pyenv-win/versions/${ver}/python3.exe -m pip install --upgrade pip -i ${PYTHON_PACK_MIRROR}
if [ ${ver} == "3.6.8" ]; then
pyenv-win/versions/${ver}/python3.exe -m pip install opencv-python==4.6.0.66 -i ${PYTHON_PACK_MIRROR}
fi
# FIXME: imperative/python/requires.txt numpy version limit have some issue, eg. some version numpy
# will cause some test case failed, so we need to install numpy first, then install other depends
numpy_version="1.21.6"
if [ ${ver} = "3.6.8" ];then
numpy_version="1.19.5"
elif [ ${ver} = "3.10.1" ];then
numpy_version="1.23.0"
fi
pyenv-win/versions/${ver}/python3.exe -m pip install numpy==${numpy_version}
pyenv-win/versions/${ver}/python3.exe -m pip install cython wheel -i ${PYTHON_PACK_MIRROR}
pyenv-win/versions/${ver}/python3.exe -m pip install -r ${SRC_DIR}/imperative/python/requires.txt -i ${PYTHON_PACK_MIRROR}
pyenv-win/versions/${ver}/python3.exe -m pip install -r ${SRC_DIR}/imperative/python/requires-test.txt -i ${PYTHON_PACK_MIRROR}
done
echo "install python packages done, put ${PYTHON3_MEGENGINE_DEV_DIR}/pyenv-win/pyenv-win/versions/xxx to PATH to use it"
}
function install_llvm() {
cd ${SRC_DIR}
# check 7z file exists or not
if [ ! -f ${ZA_INSTALL_DIR}/7z.exe ];then
echo "install 7z ..."
install_7z
fi
# put 7z to PATH
export PATH=${ZA_INSTALL_DIR}:$PATH
# check llvm file exists or not
if [ ! -f ${LLVM_MEGENGINE_DEV_DIR}/bin/clang.exe ];then
echo "install llvm ..."
rm -rf ${LLVM_MEGENGINE_DEV_DIR}
mkdir -p ${LLVM_MEGENGINE_DEV_DIR}
cd ${LLVM_MEGENGINE_DEV_DIR}
# download llvm installer
curl.exe -SL ${LLVM_INSTALLER_URL} --output llvm_installer.exe
# install llvm by 7z
7z.exe x -o${LLVM_MEGENGINE_DEV_DIR} llvm_installer.exe
# rm llvm_installer.exe
rm -rf llvm_installer.exe
if [ ! -f ${LLVM_MEGENGINE_DEV_DIR}/bin/clang.exe ];then
echo "double check llvm install failed, pls check this shell logic"
exit -1
fi
else
echo "llvm is already installed at ${LLVM_MEGENGINE_DEV_DIR}"
fi
echo "llvm install success"
}
function install_vs() {
# Install Visual Studio Build Tools
# Reference: https://learn.microsoft.com/en-us/visualstudio/install/use-command-line-parameters-to-install-visual-studio?view=vs-2019
# Component IDS:https://learn.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-build-tools?view=vs-2019
cd ${SRC_DIR}
# check vs file exists or not
if [ ! -f ${VS_INSTALL_PATH}/VC/Auxiliary/Build/vcvars64.bat ];then
echo "install vs ..."
rm -rf ${VS_INSTALL_PATH}
mkdir -p ${VS_INSTALL_PATH}
cd ${VS_INSTALL_PATH}
# vs_buildtools.exe can not at the same dir with VS_BUILD_TOOLS_URL, which will cause the install failed
cd ..
curl -SL ${VS_BUILD_TOOLS_URL} --output ./vs_buildtools.exe
echo "Try uninstall old install..."
if ./vs_buildtools.exe --uninstall --installPath $PWD/vs --quiet --norestart --wait; then
echo "Uninstall old install done"
else
echo "Uninstall old install failed, ingore this error"
fi
echo "Start to install vs2019 16 version to ${VS_INSTALL_PATH} with WIN_SDK_VER:${WIN_SDK_VER} and VC_VER:${VC_VER}, please wait..."
if ./vs_buildtools.exe --installPath $PWD/vs --nocache --wait --quiet --norestart --noweb \
--add Microsoft.Component.MSBuild \
--add Microsoft.VisualStudio.Component.Roslyn.Compiler \
--add Microsoft.VisualStudio.Component.Windows10SDK.${WIN_SDK_VER} \
--add Microsoft.VisualStudio.Workload.VCTools \
--add Microsoft.VisualStudio.Component.TextTemplating \
--add Microsoft.VisualStudio.Component.VC.CoreIde \
--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core \
--add Microsoft.VisualStudio.Component.VC.CMake.Project \
--add Microsoft.VisualStudio.Component.VC.${VC_VER}.x86.x64; then
echo "Install vs2019 16 version to ${VS_INSTALL_PATH} with WIN_SDK_VER:${WIN_SDK_VER} and VC_VER:${VC_VER} done"
else
echo "Install vs2019 16 version to ${VS_INSTALL_PATH} with WIN_SDK_VER:${WIN_SDK_VER} and VC_VER:${VC_VER} failed"
echo "now get the install log"
curl.exe -o vscollect.exe -SL "https://aka.ms/vscollect.exe"
./vscollect.exe
# FIXME: why windows tools so stupid, do not work perfect from terminal, also the log need collect by another tool..
for i in {1..20}; do echo "also may uninstall failed from CMD, try uninstall from GUI: by click vs_buildtools.exe add uninstall broken install at UI side"; done
exit -1
fi
if [ ! -f ${VS_INSTALL_PATH}/VC/Auxiliary/Build/vcvars64.bat ];then
echo "double check vs install failed, pls check this shell logic"
exit -1
fi
else
echo "vs is already installed at ${VS_INSTALL_PATH}"
fi
echo "vs install success"
}
##########################################################
# windows shell env not stable, so you can run this script
# step by step by comment some function
##########################################################
DONE_MSG="install all dev env(except cuda) done"
install_7z
install_swig
install_python
install_llvm
install_vs
echo ${DONE_MSG}
#-*-coding:utf-8-*-
"""
Install the llvm.
"""
import os
import subprocess
os.environ["PATH"] = r"C:\Program Files\7-Zip;"+os.environ["PATH"]
class LLVMInstaller:
def __init__(self, install_path="./llvm") -> None:
self.install_path = install_path
self.download_url = "https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.1/LLVM-12.0.1-win64.exe"
self.pakage_name = "LLVM-12.0.1-win64.exe"
def setup(self):
download_url = ["curl.exe", "-SL", self.download_url, "--output", self.pakage_name]
if not os.path.exists(self.pakage_name):
subprocess.run(download_url)
else:
print("The cmake package {} is exists, skip download".format(self.pakage_name))
setup_cmd = ["7z", "x", f"-o{self.install_path}", self.pakage_name]
subprocess.run(setup_cmd)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("LLVM install procedure")
parser.add_argument("--install_path", type=str, default="./llvm_tool",
help="the path to install the cmake")
args=parser.parse_args()
llvm = LLVMInstaller(args.install_path)
llvm.setup()
\ No newline at end of file
$versions=("3.6.8", "3.7.7", "3.8.3", "3.9.4", "3.10.1")
foreach($ver in $versions)
{
$download_url="https://www.python.org/ftp/python/${ver}/python-${ver}-amd64.exe"
$download_file="python-${ver}-amd64.exe"
echo "Download the python-${ver} from ${download_url}"
curl.exe -SL $download_url --output $download_file
if ($LASTEXITCODE -ne 0) {
echo "Download file ${download_file} failed"
}
$process = Start-Process "python-${ver}-amd64.exe" -ArgumentList @("/quiet","Include_launcher=0", "TargetDir=$PWD\python_dev\$ver", "Shortcuts=0", "InstallLauncherAllUsers=0") -Wait -PassThru
$EXITCODE=$process.ExitCode
if($EXITCODE -eq 0)
{
cp $PWD/python_dev/$ver/python.exe $PWD/python_dev/$ver/python3.exe
}
else {
echo "Setup python $ver failed"
}
del $download_file
}
\ No newline at end of file
#Install Visual Studio Build Tools
#Reference: https://learn.microsoft.com/en-us/visualstudio/install/use-command-line-parameters-to-install-visual-studio?view=vs-2019
#Component IDS:https://learn.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-build-tools?view=vs-2019
echo "Try to download the setup file from https://aka.ms/vs/16/release/vs_buildtools.exe"
curl -SL https://aka.ms/vs/16/release/vs_buildtools.exe --output ./vs_buildtools.exe
./vs_buildtools.exe --installPath $PWD/vs --nocache --wait --quiet --norestart \
--add Microsoft.Component.MSBuild \
--add Microsoft.VisualStudio.Component.Roslyn.Compiler \
--add Microsoft.VisualStudio.Component.Windows10SDK.18362 \
--add Microsoft.VisualStudio.Workload.VCTools \
--add Microsoft.VisualStudio.Component.TextTemplating \
--add Microsoft.VisualStudio.Component.VC.CoreIde \
--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest \
--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core \
--add Microsoft.VisualStudio.Component.VC.CMake.Project \
--add Microsoft.VisualStudio.Component.VC.14.26.x86.x64
rm vs_buildtools.exe
if [[ $ERRORLEVEL -ne 3010 ]]; then
echo "Error exit code:" $ERRORLEVEL
curl.exe -o vscollect.exe -SL "https://aka.ms/vscollect.exe"
./vscollect.exe -Wait -PassThru -zip ${PWD}/log.zip
fi
\ No newline at end of file
#!/bin/bash -e
#!/usr/bin/env bash
set -e
NT=$(echo `uname` | grep "NT")
echo $NT
......@@ -8,44 +9,170 @@ if [ -z "$NT" ];then
exit -1
fi
SRC_DIR=$(READLINK -f "`dirname $0`/../../../")
source ${SRC_DIR}/scripts/whl/utils/utils.sh
source ${SRC_DIR}/scripts/whl/windows/config.sh
function err_env() {
echo "check_env failed: pls refs ${SRC_DIR}/scripts/whl/BUILD_PYTHON_WHL_README.md to init env"
echo "check_env failed: pls call ${SRC_DIR}/scripts/whl/windows/env_prepare.sh to init env"
exit -1
}
echo $EXTRA_CMAKE_FLAG
function append_path_env_and_check() {
if [[ -z $VS_PATH ]]; then
echo "export vs2019 install path"
export VS_PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2019/Enterprise
SDK_NAME="unknown"
x86_64_support_version="cpu cu101 cu118"
if [[ -z ${IN_CI} ]]
then
IN_CI="false"
fi
function usage() {
echo "use -sdk sdk_version to specify sdk toolkit config!"
echo "now x86_64 sdk_version support ${x86_64_support_version}"
}
while [ "$1" != "" ]; do
case $1 in
-sdk)
shift
SDK_NAME=$1
shift
;;
*)
usage
exit -1
esac
done
is_valid_sdk="false"
all_sdk=""
machine=$(uname -m)
case ${machine} in
x86_64) all_sdk=${x86_64_support_version} ;;
*) echo "nonsupport env!!!";exit -1 ;;
esac
for i_sdk in ${all_sdk}
do
if [ ${i_sdk} == ${SDK_NAME} ];then
is_valid_sdk="true"
fi
if [[ -z $LLVM_PATH ]]; then
echo "export LLVM install path"
export LLVM_PATH=/c/Program\ Files/LLVM_12_0_1
done
if [ ${is_valid_sdk} == "false" ];then
echo "invalid sdk: ${SDK_NAME}"
usage
exit -1
fi
echo "Build with ${SDK_NAME}"
# export setup.py local version
export SDK_NAME=${SDK_NAME}
# TODO: Windows CI take a long time, we have no enough resource to test
# so only build one sm to speed build. after have enough resource, remove this
# now we test at 1080TI remote env, so config sm to 61
if [ ${IN_CI} = "true" ] ; then
EXTRA_CMAKE_FLAG=" -DMGE_CUDA_GENCODE=\"-gencode arch=compute_61,code=sm_61\" "
fi
CUDA_LIBS="not_find"
CUDNN_LIBS="not_find"
TRT_LIBS="not_find"
MGE_EXPORT_DLL="${SRC_DIR}/build_dir/host/build/src/megengine_shared.dll"
MGE_EXPORT_LIB="${SRC_DIR}/build_dir/host/build/src/megengine_shared.lib"
if [ $SDK_NAME == "cu101" ];then
REQUIR_CUDA_VERSION="10010"
REQUIR_CUDNN_VERSION="7.6.5"
REQUIR_TENSORRT_VERSION="6.0.1.5"
REQUIR_CUBLAS_VERSION="10.1.0"
CUDA_ROOT_DIR=${CUDA_ROOT_DIR_101}
CUDNN_ROOT_DIR=${CUDNN_ROOT_DIR_101}
TRT_ROOT_DIR=${TRT_ROOT_DIR_101}
TENSORRT_ROOT_DIR=${TRT_ROOT_DIR}
CUDA_LIBS="${CUDA_ROOT_DIR}/bin/cusolver64_10.dll:${CUDA_ROOT_DIR}/bin/cublas64_10.dll\
:${CUDA_ROOT_DIR}/bin/curand64_10.dll:${CUDA_ROOT_DIR}/bin/cublasLt64_10.dll\
:${CUDA_ROOT_DIR}/bin/cudart64_101.dll"
CUDNN_LIBS="${CUDNN_ROOT_DIR}/bin/cudnn64_7.dll"
TRT_LIBS="${TRT_ROOT_DIR}/lib/nvinfer.dll:${TRT_ROOT_DIR}/lib/nvinfer_plugin.dll"
elif [ $SDK_NAME == "cu118" ];then
REQUIR_CUDA_VERSION="11080"
REQUIR_CUDNN_VERSION="8.6.0"
REQUIR_TENSORRT_VERSION="8.5.3.1"
REQUIR_CUBLAS_VERSION="11.11.3.6"
CUDA_ROOT_DIR=${CUDA_ROOT_DIR_118}
CUDNN_ROOT_DIR=${CUDNN_ROOT_DIR_118}
TRT_ROOT_DIR=${TRT_ROOT_DIR_118}
TENSORRT_ROOT_DIR=${TRT_ROOT_DIR}
CUDA_LIBS="${CUDA_ROOT_DIR}/bin/cusolver64_11.dll:${CUDA_ROOT_DIR}/bin/cublas64_11.dll\
:${CUDA_ROOT_DIR}/bin/curand64_10.dll:${CUDA_ROOT_DIR}/bin/cublasLt64_11.dll\
:${CUDA_ROOT_DIR}/bin/cudart64_110.dll:${CUDA_ROOT_DIR}/bin/nvrtc64_112_0.dll"
CUDNN_LIBS="${CUDNN_ROOT_DIR}/bin/cudnn64_8.dll:${CUDNN_ROOT_DIR}/bin/cudnn_cnn_infer64_8.dll\
:${CUDNN_ROOT_DIR}/bin/cudnn_ops_train64_8.dll:${CUDNN_ROOT_DIR}/bin/cudnn_adv_infer64_8.dll\
:${CUDNN_ROOT_DIR}/bin/cudnn_cnn_train64_8.dll:${CUDNN_ROOT_DIR}/bin/cudnn_adv_train64_8.dll\
:${CUDNN_ROOT_DIR}/bin/cudnn_ops_infer64_8.dll:${CUDNN_ROOT_DIR}/bin/zlibwapi.dll"
# workround for CU118 depends on zlibwapi.dll
if [[ ! -f ${CUDNN_ROOT_DIR}/bin/zlibwapi.dll ]]; then
echo "can not find zlibwapi.dll, download from ${ZLIBWAPI_URL}"
rm -rf tttttmp_1988
mkdir -p tttttmp_1988
cd tttttmp_1988
curl -SL ${ZLIBWAPI_URL} --output zlib123dllx64.zip
unzip -X zlib123dllx64.zip
cp dll_x64/zlibwapi.dll ${CUDNN_ROOT_DIR}/bin/
cd ..
rm -rf tttttmp_1988
# double check
if [[ ! -f ${CUDNN_ROOT_DIR}/bin/zlibwapi.dll ]]; then
echo "some issue happened when prepare zlibwapi.dll, please fix me!!!!"
exit -1
fi
fi
}
append_path_env_and_check
TRT_LIBS="${TRT_ROOT_DIR}/lib/nvinfer.dll:${TRT_ROOT_DIR}/lib/nvinfer_builder_resource.dll:\
${TRT_ROOT_DIR}/lib/nvinfer_plugin.dll"
SRC_DIR=$(READLINK -f "`dirname $0`/../../../")
source ${SRC_DIR}/scripts/whl/utils/utils.sh
elif [ $SDK_NAME == "cpu" ];then
BUILD_WHL_CPU_ONLY="ON"
else
echo "no support sdk ${SDK_NAME}"
usage
exit -1
fi
ALL_PYTHON=${ALL_PYTHON}
FULL_PYTHON_VER="3.6.8 3.7.7 3.8.3 3.9.4 3.10.1"
if [[ -z ${ALL_PYTHON} ]]
if [[ -z ${BUILD_WHL_CPU_ONLY} ]]
then
ALL_PYTHON=${FULL_PYTHON_VER}
BUILD_WHL_CPU_ONLY="OFF"
fi
if [ $SDK_NAME == "cpu" ];then
echo "use $SDK_NAME without cuda support"
else
check_python_version_is_valid "${ALL_PYTHON}" "${FULL_PYTHON_VER}"
echo "CUDA_LIBS: $CUDA_LIBS"
echo "CUDNN_LIBS: $CUDNN_LIBS"
echo "TRT_LIBS: $TRT_LIBS"
# for utils.sh sub bash script, eg host_build.sh
export CUDA_ROOT_DIR=${CUDA_ROOT_DIR}
export CUDNN_ROOT_DIR=${CUDNN_ROOT_DIR}
export TRT_ROOT_DIR=${TRT_ROOT_DIR}
fi
check_cuda_cudnn_trt_version
check_python_version_is_valid "${ALL_PYTHON}" "${FULL_PYTHON_VER}"
PYTHON_DIR=
PYTHON_LIBRARY=
PYTHON_INCLUDE_DIR=
WINDOWS_WHL_HOME=${SRC_DIR}/scripts/whl/windows/windows_whl_home
if [[ -z $PYTHON_ROOT ]]; then
export PYTHON_ROOT="/c/Users/${USER}/mge_whl_python_env"
fi
WINDOWS_WHL_HOME=${SRC_DIR}/scripts/whl/windows/windows_whl_home/${SDK_NAME}
if [ -e "${WINDOWS_WHL_HOME}" ]; then
echo "remove old windows whl file"
rm -rf ${WINDOWS_WHL_HOME}
......@@ -53,7 +180,7 @@ fi
mkdir -p ${WINDOWS_WHL_HOME}
function config_python_env() {
PYTHON_DIR=$PYTHON_ROOT/$1
PYTHON_DIR=${DFT_PYTHON_BIN}/../$1
PYTHON_BIN=${PYTHON_DIR}
if [ ! -f "${PYTHON_BIN}/python3.exe" ]; then
echo "ERR: can not find $PYTHON_BIN , Invalid python package"
......@@ -76,42 +203,6 @@ then
BUILD_WHL_CPU_ONLY="OFF"
fi
if [[ -z ${CUDA_ROOT_DIR} ]]; then
export CUDA_ROOT_DIR="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1"
fi
if [[ -z ${CUDNN_ROOT_DIR} ]]; then
export CUDNN_ROOT_DIR="/c/Program Files/NVIDIA GPU Computing Toolkit/cudnn-10.1-windows10-x64-v7.6.5.32/cuda"
fi
if [[ -z ${TRT_ROOT_DIR} ]]; then
export TRT_ROOT_DIR="/c/Program Files/NVIDIA GPU Computing Toolkit/TensorRT-6.0.1.5"
fi
# config NVIDIA libs
TRT_LIBS=`ls $TRT_ROOT_DIR/lib/nvinfer*.dll`
if [[ $TRT_VERSION == "7.2.3.4" ]]; then
MYELIN_LIB=`ls $TRT_ROOT_DIR/lib/myelin64_*.dll`
fi
CUDNN_LIBS=`ls $CUDNN_ROOT_DIR/bin/cudnn*.dll`
CUSOLVER_LIB=`ls $CUDA_ROOT_DIR/bin/cusolver64_*.dll`
CUBLAS_LIB=`ls $CUDA_ROOT_DIR/bin/cublas64_*.dll`
CURAND_LIB=`ls $CUDA_ROOT_DIR/bin/curand64_*.dll`
CUBLASLT_LIB=`ls $CUDA_ROOT_DIR/bin/cublasLt64_*.dll`
CUDART_LIB=`ls $CUDA_ROOT_DIR/bin/cudart64_*.dll`
if [[ $TRT_VERSION == 7.2.3.4 ]]; then
NVTRC_LIB=`ls $CUDA_ROOT_DIR/bin/nvrtc64_111_0.dll`
else
NVTRC_LIB=`ls $CUDA_ROOT_DIR/bin/nvrtc64_*.dll`
fi
if [[ $SDK_NAME == "cu118" ]]; then
ZLIBWAPI=`ls $CUDA_ROOT_DIR/bin/zlibwapi.dll`
fi
# CUDART_LIB="/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1/bin/cudart64_101.dll"
MGE_EXPORT_DLL="${SRC_DIR}/build_dir/host/build/src/megengine_shared.dll"
MGE_EXPORT_LIB="${SRC_DIR}/build_dir/host/build/src/megengine_shared.lib"
function depend_real_copy() {
REAL_DST=$1
echo "real copy lib to $1"
......@@ -120,36 +211,27 @@ function depend_real_copy() {
if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
echo "copy nvidia lib...."
for TRT_LIB in $TRT_LIBS
IFS=: read -a lib_name_array <<<"$TRT_LIBS"
for lib_name in ${lib_name_array[@]};
do
echo "Copy ${TRT_LIB} to ${REAL_DST}"
cp "${TRT_LIB}" ${REAL_DST}
echo "Copy ${lib_name} to ${REAL_DST}"
cp ${lib_name} ${REAL_DST}
done
if [[ ! -z $MYELIN_LIB ]]; then
cp "$MYELIN_LIB" ${REAL_DST}
fi
for CUDNN_LIB in $CUDNN_LIBS
IFS=: read -a lib_name_array <<<"$CUDNN_LIBS"
for lib_name in ${lib_name_array[@]};
do
echo "Copy ${CUDNN_LIB} to ${REAL_DST}"
cp "${CUDNN_LIB}" ${REAL_DST}
echo "Copy ${lib_name} to ${REAL_DST}"
cp ${lib_name} ${REAL_DST}
done
IFS=: read -a lib_name_array <<<"$CUDA_LIBS"
for lib_name in ${lib_name_array[@]};
do
echo "Copy ${lib_name} to ${REAL_DST}"
cp ${lib_name} ${REAL_DST}
done
cp "${CUSOLVER_LIB}" ${REAL_DST}
cp "${CUBLAS_LIB}" ${REAL_DST}
cp "${CURAND_LIB}" ${REAL_DST}
cp "${CUBLASLT_LIB}" ${REAL_DST}
cp "${CUDART_LIB}" ${REAL_DST}
if [[ ! -z ${NVTRC_LIB} ]]; then
for lib in ${NVTRC_LIB}
do
echo "Copy ${lib} to ${REAL_DST}"
cp "${lib}" ${REAL_DST}
done
fi
if [[ ! -z ${ZLIBWAPI} ]]; then
echo "Copy ${ZLIBWAPI} to ${REAL_DST}"
cp "${ZLIBWAPI}" ${REAL_DST}
fi
fi
}
......@@ -300,12 +382,5 @@ function third_party_prepare() {
######################
export ALREADY_CONFIG_PYTHON_VER="yes"
if [ ${BUILD_WHL_CPU_ONLY} = "OFF" ]; then
if [[ -z $SDK_NAME ]]; then
export SDK_NAME="cu101"
fi
else
export SDK_NAME="cpu"
fi
third_party_prepare
do_build
\ No newline at end of file
do_build
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册