From ce9ad07a27ec909fb8db4dd67943d24ba98fb93a Mon Sep 17 00:00:00 2001 From: huangxinda Date: Mon, 7 Feb 2022 14:24:11 +0800 Subject: [PATCH] feat(ci): update ci and readme --- .github/workflows/ci.yml | 12 ++++++++---- README.md | 2 +- README_CN.md | 6 +++--- ci/cmake.sh | 3 ++- ci/docker_env/Dockerfile | 22 ++++++++++++++++------ 5 files changed, 30 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a9450588f..0d2c9079b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,12 +23,13 @@ jobs: runs-on: self-hosted needs: [check-commit] container: - image: localhost:5000/megengine-ci:latest + image: localhost:5000/megengine-ci:v1 steps: - name: Checkout MegEngine uses: actions/checkout@v2 - name: Checkout submodules run: | + apt update&&apt install ninja-build ./third_party/prepare.sh ./third_party/install-mkl.sh - name: Build MegEngine @@ -46,9 +47,9 @@ jobs: runs-on: self-hosted needs: [check-commit] container: - image: localhost:5000/megengine-ci:latest + image: localhost:5000/megengine-ci:v1 volumes: - - /usr/local/cuda-10.1-libs:/usr/local/cuda-10.1-libs + - /usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs options: --gpus all --shm-size 1g env: NCCL_LAUNCH_MODE: PARALLEL @@ -57,6 +58,7 @@ jobs: uses: actions/checkout@v2 - name: Checkout submodules run: | + apt update&&apt install ninja-build ./third_party/prepare.sh ./third_party/install-mkl.sh - name: Build MegEngine @@ -72,8 +74,10 @@ jobs: run: ./ci/run_cpp_test.sh cuda auto-merge: if: ${{ github.ref == 'refs/heads/try-import' }} - runs-on: ubuntu-latest + runs-on: self-hosted needs: [cpu-test, gpu-test] + container: + image: localhost:5000/megengine-ci:v1 steps: - name: Checkout MegEngine uses: actions/checkout@v2 diff --git a/README.md b/README.md index ac7db1a9d..859d4de79 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ MegEngine is a fast, scalable and easy-to-use deep learning framework, with auto ## Installation -**NOTE:** MegEngine now supports Python installation on Linux-64bit/Windows-64bit/MacOS(CPU-Only)-10.14+ platforms with Python from 3.5 to 3.8. On Windows 10 you can either install the Linux distribution through [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl) or install the Windows distribution directly. Many other platforms are supported for inference. +**NOTE:** MegEngine now supports Python installation on Linux-64bit/Windows-64bit/MacOS(CPU-Only)-10.14+/Android 7+(CPU-Only) platforms with Python from 3.5 to 3.8. On Windows 10 you can either install the Linux distribution through [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl) or install the Windows distribution directly. Many other platforms are supported for inference. ### Binaries diff --git a/README_CN.md b/README_CN.md index 93fbf9657..92b6191be 100644 --- a/README_CN.md +++ b/README_CN.md @@ -13,7 +13,7 @@ MegEngine 是一个快速、可拓展、易于使用且支持自动求导的深 ## 安装说明 -**注意:** MegEngine 现在支持在 Linux-64bit/Windows-64bit/macos-10.14及其以上 (MacOS只支持cpu) 等平台上安装 Python 包,支持Python3.5 到 Python3.8。对于 Windows 10 用户,可以通过安装 [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl) 进行体验,同时我们也原生支持Windows。MegEngine 也支持在很多其它平台上进行推理运算。 +**注意:** MegEngine 现在支持在 Linux-64bit/Windows-64bit/macos-10.14/Android 7+ 及其以上 (MacOS/Android只支持cpu) 等平台上安装 Python 包,支持Python3.5 到 Python3.8。对于 Windows 10 用户,可以通过安装 [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl) 进行体验,同时我们也原生支持Windows。MegEngine 也支持在很多其它平台上进行推理运算。 ### 通过包管理器安装 @@ -26,8 +26,8 @@ python3 -m pip install megengine -f https://megengine.org.cn/whl/mge.html ## 通过源码编译安装 -* CMake编译细节请参考 [BUILD_README.md](scripts/cmake-build/BUILD_README.md) -* Python绑定编译细节请参考 [BUILD_PYTHON_WHL_README.md](scripts/whl/BUILD_PYTHON_WHL_README.md) +* CMake 编译细节请参考 [BUILD_README.md](scripts/cmake-build/BUILD_README.md) +* Python 绑定编译细节请参考 [BUILD_PYTHON_WHL_README.md](scripts/whl/BUILD_PYTHON_WHL_README.md) ## 如何参与贡献 diff --git a/ci/cmake.sh b/ci/cmake.sh index 8d8c55bf2..f9cc91d23 100755 --- a/ci/cmake.sh +++ b/ci/cmake.sh @@ -27,7 +27,8 @@ function build() { -DMGE_WITH_DISTRIBUTED=${DMGE_WITH_DISTRIBUTED} \ -DMGE_WITH_CUDA=${DMGE_WITH_CUDA} \ -DMGE_WITH_TEST=ON \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DMGE_WITH_CUSTOM_OP=ON make -j$(($(nproc) * 2)) -I ${build_dir} make develop popd >/dev/null diff --git a/ci/docker_env/Dockerfile b/ci/docker_env/Dockerfile index 4496fc99d..f021b6c39 100644 --- a/ci/docker_env/Dockerfile +++ b/ci/docker_env/Dockerfile @@ -24,17 +24,27 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ swig \ vim \ wget \ + libgl1-mesa-glx \ + libsm6 \ + libxext6 \ zlib1g-dev \ # GitLab Runner need Git 2.18 or higher to create a local Git repository && add-apt-repository ppa:git-core/ppa -y && apt-get install --no-install-recommends -y git \ && rm -rf /var/lib/apt/lists/* -RUN cd /tmp ; wget https://cmake.org/files/v3.14/cmake-3.14.4.tar.gz;tar -xzvf cmake-3.14.4.tar.gz;cd cmake-3.14.4;./configure; make -j32; make install +RUN cd /tmp ; wget https://cmake.org/files/v3.15/cmake-3.15.2.tar.gz;tar -xzvf cmake-3.15.2.tar.gz;cd cmake-3.15.2;./configure; make -j32; make install RUN git lfs install -ENV PATH=${PATH}:/usr/local/cuda/bin \ - LIBRARY_PATH=${LIBRARY_PATH}:/usr/local/cuda/lib:/usr/local/cuda/lib64:/usr/local/cuda/lib/stubs:/usr/local/cuda/lib64/stubs:/usr/local/cuda-10.1-libs/cudnn-v7.6.0/lib:/usr/local/cuda-10.1-libs/cudnn-v7.6.0/lib64:/usr/local/cuda-10.1-libs/TensorRT-5.1.5.0/lib:/usr/local/cuda-10.1-libs/TensorRT-5.1.5.0/lib64 \ - LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda-10.1-libs/cudnn-v7.6.0/lib:/usr/local/cuda-10.1-libs/cudnn-v7.6.0/lib64:/usr/local/cuda-10.1-libs/TensorRT-5.1.5.0/lib:/usr/local/cuda-10.1-libs/TensorRT-5.1.5.0/lib64:/tmp/build/cuda/dnn/cuda-stub/libcuda.so \ - CPATH=${CPATH}:/usr/local/cuda/include:/usr/local/cuda-10.1-libs/cudnn-v7.6.0/include:/usr/local/cuda-10.1-libs/TensorRT-5.1.5.0/include \ - CUDA_BIN_PATH=/usr/local/cuda +RUN pip3 install --upgrade pip + +# TODO: set following envs in github environment. +ENV CUDA_ROOT_DIR=/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1 \ + TRT_ROOT_DIR=/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/TensorRT-6.0.1.5 \ + TENSORRT_ROOT_DIR=${TRT_ROOT_DIR} \ + CUDNN_ROOT_DIR=/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cudnn-v7.6.3 \ + PATH=/usr/bin:${CUDA_ROOT_DIR}/bin:${CUDA_ROOT_DIR}/nsight-compute-2019.4.0:$PATH \ + LIBRARY_PATH=${LIBRARY_PATH}:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1/lib:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1/lib64:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1/lib/stubs:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1/lib64/stubs:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cudnn-v7.6.3/lib:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cudnn-v7.6.3/lib64:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/TensorRT-6.0.1.5/lib:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/TensorRT-6.0.1.5/lib64 \ + LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cudnn-v7.6.3/lib:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cudnn-v7.6.3/lib64:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/TensorRT-6.0.1.5/lib:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/TensorRT-6.0.1.5/lib64 \ + CPATH=${CPATH}:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1/include:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cudnn-v7.6.3/include:/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/TensorRT-6.0.1.5/include \ + CUDA_BIN_PATH=/usr/local/cuda-10.1-cudnn-7.6.3-trt-6.0.1.5-libs/cuda-10.1 -- GitLab