name: nv-inference on: push: branches: - 'master' - 'staging**' paths-ignore: - 'docs/**' pull_request: paths-ignore: - 'docs/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: unit-tests: runs-on: [self-hosted, nvidia, cu113, v100] steps: - uses: actions/checkout@v2 - name: environment run: | echo "JobID: $AISC_NODE_INSTANCE_ID" nvidia-smi which python python --version which nvcc nvcc --version pip install --upgrade pip pip uninstall --yes torch torchvision pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113 python -c "import torch; print('torch:', torch.__version__, torch)" python -c "import torch; print('CUDA available:', torch.cuda.is_available())" - name: Install transformers run: | git clone https://github.com/huggingface/transformers cd transformers git rev-parse --short HEAD pip uninstall --yes transformers pip install . - name: Python environment run: | pip list - name: Install deepspeed run: | pip uninstall --yes deepspeed pip install .[dev,1bit,autotuning,sparse_attn,inf] ds_report - name: Unit tests run: | unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch if [[ -d ./torch-extensions ]]; then rm -rf ./torch-extensions; fi cd tests EXPECTED_TORCH=$(pip index versions torch | grep -oP -m1 "^\s*LATEST.*\s\K\d+\.\d+") TRANSFORMERS_CACHE=/blob/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest --color=yes --durations=0 --verbose -m 'seq_inference' unit/ --torch_ver=$EXPECTED_TORCH --cuda_ver="11.3" TRANSFORMERS_CACHE=/blob/transformers_cache/ TORCH_EXTENSIONS_DIR=./torch-extensions pytest --color=yes --durations=0 -n 4 --verbose -m 'inference' unit/ --torch_ver=$EXPECTED_TORCH --cuda_ver="11.3"