提交 125406e3 编写于 作者: M Megvii Engine Team

feat(cmake/windows/cuda): upgrade windows llvm to

lastest 12.0.1, for fit cutlass upgrade

GitOrigin-RevId: 23b9d779dfbfc6dbbf31b63abdcc7a4aa6c654cd
上级 b8ea6392
......@@ -24,17 +24,16 @@
* install to default dir: /c/Program\ Files/Git
2: install visual studio 2019 Enterprise (Windows GUI)
* download install exe from https://visualstudio.microsoft.com
* choose "c++ develop" -> choose cmake/MSVC/clang/cmake/windows-sdk when install
* choose "c++ develop" -> choose cmake/MSVC/cmake/windows-sdk when install
* NOTICE: windows sdk version >=14.28.29910 do not compat with CUDA 10.1, please
choose version < 14.28.29910
* then install choosed components
* after install visual studio 2019 Enterprise, time to replace lld-link.exe
caused by visual studio 2019 lld-link.exe have crash issue
download office exe from https://releases.llvm.org/download.html
install to default: C:\Program Files\LLVM
cd "/c/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/Tools/Llvm/bin"
cp /c/Program\ Files/LLVM/bin/lld-link.exe lld-link.exe
3: install python3 (Windows GUI)
3: install LLVM from https://releases.llvm.org/download.html (Windows GUI)
* llvm install by Visual Studio have some issue, eg, link crash on large project, please use official version
* download install exe from https://releases.llvm.org/download.html
* our ci use LLVM 12.0.1, if u install other version, please modify LLVM_PATH
* install 12.0.1 to /c/Program\ Files/LLVM_12_0_1
4: install python3 (Windows GUI)
* download python 64-bit install exe (we support python3.5-python3.8 now)
https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe
https://www.python.org/ftp/python/3.6.8/python-3.6.8-amd64.exe
......@@ -52,21 +51,21 @@
python3.exe -m pip install --upgrade pip
python3.exe -m pip install -r imperative/python/requires.txt
python3.exe -m pip install -r imperative/python/requires-test.txt
4: install cuda components (Windows GUI)
5: install cuda components (Windows GUI)
* now we support cuda10.1+cudnn7.6+TensorRT6.0 on Windows
* install cuda10.1 to C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1
* install cudnn7.6 to C:\Program Files\NVIDIA GPU Computing Toolkit\cudnn-10.1-windows10-x64-v7.6.5.32
* install TensorRT6.0 to C:\Program Files\NVIDIA GPU Computing Toolkit\TensorRT-6.0.1.5
5: edit system env variables (Windows GUI)
6: edit system env variables (Windows GUI)
* create new key: "VS_PATH", value: "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise"
* create new key: "LLVM_PATH", value: "C:\Program Files\LLVM_12_0_1"
* append "Path" env value
C:\Program Files\Git\cmd
C:\Users\build\mge_whl_python_env\3.8.3
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\bin
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\libnvvp
C:\Program Files\NVIDIA GPU Computing Toolkit\cudnn-10.1-windows10-x64-v7.6.5.32\cuda\bin
C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\Llvm\lib\clang\11.0.0\lib\windows
C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Tools\Llvm\x64\lib\clang\11.0.0\lib\windows
C:\Program Files\LLVM_12_0_1\lib\clang\12.0.1\lib\windows
```
### Linux host build
......
......@@ -161,7 +161,7 @@ function prepare_env_for_windows_build() {
fi
echo $VS_PATH
# only use cmake/clang-cl/Ninja install from Visual Studio, if not, may build failed
# only use cmake/Ninja install from Visual Studio, if not, may build failed
# some user env may install cmake/clang-cl/Ninja at windows-git-bash env, so we put Visual Studio
# path at the head of PATH, and check the valid
echo "check cmake install..."
......@@ -176,14 +176,22 @@ function prepare_env_for_windows_build() {
fi
echo "check clang-cl install..."
export PATH=$VS_PATH/VC/Tools/Llvm/bin/:$PATH
which clang-cl
# llvm install by Visual Studio have some issue, eg, link crash on large project, so we
# use official LLVM download from https://releases.llvm.org/download.html
if [[ -z ${LLVM_PATH} ]];then
echo "can not find LLVM_PATH env, pls export you LLVM install dir to LLVM_PATH"
echo "examle for export LLVM_12_0_1"
echo "export LLVM_PATH=/c/Program\ Files/LLVM_12_0_1"
exit -1
fi
echo ${LLVM_PATH}
export PATH=${LLVM_PATH}/bin/:$PATH
clang_loc=`which clang-cl`
if [[ $clang_loc =~ "Visual" ]]; then
echo "clang-cl valid ..."
else
echo "clang-cl Invalid: ..."
echo "clang-cl Invalid: we do not support use LLVM installed by Visual Studio"
windows_env_err
else
echo "clang-cl valid ..."
fi
echo "check Ninja install..."
......
......@@ -16,8 +16,10 @@ function err_env() {
function append_path_env_and_check() {
echo "export vs2019 install path"
export VS_PATH=/c/Program\ Files\ \(x86\)/Microsoft\ Visual\ Studio/2019/Enterprise
echo "export LLVM install path"
export LLVM_PATH=/c/Program\ Files/LLVM_12_0_1
# for llvm-strip
export PATH=$VS_PATH/VC/Tools/Llvm/bin/:$PATH
export PATH=${LLVM_PATH}/bin/:$PATH
}
append_path_env_and_check
......
......@@ -12,5 +12,10 @@ install (TARGETS load_and_run EXPORT ${MGE_EXPORT_TARGETS} RUNTIME DESTINATION $
if(MGE_WITH_TEST)
add_executable(json_loader_test test/json_loader_test.cpp src/json_loader.h src/json_loader.cpp)
target_link_libraries (json_loader_test megengine)
# Windows does not support implicitly importing data members from DLL.
if (WIN32)
target_link_libraries (json_loader_test megbrain megdnn ${MGE_CUDA_LIBS})
else()
target_link_libraries (json_loader_test megengine)
endif()
endif()
......@@ -197,33 +197,37 @@ endif()
set (_VER_FILE ${PROJECT_SOURCE_DIR}/src/version.ld)
if(MGE_BUILD_IMPERATIVE_RT
)
message(VERBOSE "create a export SHARED lib for python use")
add_library(megengine_export SHARED)
target_link_libraries(megengine_export PUBLIC megbrain megdnn)
target_link_libraries(megengine_export PRIVATE ${MGE_CUDA_LIBS})
if (MGE_WITH_DISTRIBUTED)
message(VERBOSE "megengine_export configured to link megray")
target_link_libraries(megengine_export PUBLIC megray)
# Windows does not support implicitly importing data members from DLL.
# on Windows:
# depends on megdnn/megbrain target, refs to sdk/load-and-run/CMakeLists.txt
# depends on megengine lite_share or lite_static
if(NOT WIN32)
if(MGE_BUILD_IMPERATIVE_RT
)
message(VERBOSE "create a export SHARED lib for python use")
add_library(megengine_export SHARED)
target_link_libraries(megengine_export PUBLIC megbrain megdnn)
target_link_libraries(megengine_export PRIVATE ${MGE_CUDA_LIBS})
if (MGE_WITH_DISTRIBUTED)
message(VERBOSE "megengine_export configured to link megray")
target_link_libraries(megengine_export PUBLIC megray)
endif()
endif()
endif()
# Build as SHARED or STATIC depending on BUILD_SHARED_LIBS=ON/OFF
add_library(megengine)
target_link_libraries(megengine PRIVATE ${MGE_CUDA_LIBS})
target_link_libraries(megengine PUBLIC megbrain megdnn)
if (UNIX AND NOT APPLE)
target_link_options(megengine PRIVATE -Wl,--no-undefined -Wl,--version-script=${_VER_FILE})
set_target_properties(megengine PROPERTIES LINK_DEPENDS ${_VER_FILE})
endif()
set_target_properties(megengine PROPERTIES CXX_VISIBILITY_PRESET default)
set_target_properties(megengine PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
# Do not export targets if MGE_WITH_DISTRIBUTED is on. MegRay is not ready
# for this.
install(TARGETS megengine
# Build as SHARED or STATIC depending on BUILD_SHARED_LIBS=ON/OFF
add_library(megengine)
target_link_libraries(megengine PRIVATE ${MGE_CUDA_LIBS})
target_link_libraries(megengine PUBLIC megbrain megdnn)
if (UNIX AND NOT APPLE)
target_link_options(megengine PRIVATE -Wl,--no-undefined -Wl,--version-script=${_VER_FILE})
set_target_properties(megengine PROPERTIES LINK_DEPENDS ${_VER_FILE})
endif()
# Do not export targets if MGE_WITH_DISTRIBUTED is on. MegRay is not ready
# for this.
install(TARGETS megengine
EXPORT ${MGE_EXPORT_TARGETS}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
if (NOT MGE_WITH_DISTRIBUTED)
install(TARGETS megbrain
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册