name: nv-transformers-v100 on: push: branches: - 'master' - 'staging**' paths-ignore: - 'docs/**' pull_request: paths-ignore: - 'docs/**' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: unit-tests: runs-on: [self-hosted, nvidia, torch18, v100] steps: - uses: actions/checkout@v2 - name: environment run: | nvidia-smi which python python --version which nvcc nvcc --version pip install torch==1.8.2+cu111 torchvision==0.9.2+cu111 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html python -c "import torch; print('torch:', torch.__version__, torch)" python -c "import torch; print('CUDA available:', torch.cuda.is_available())" - name: Install deepspeed run: | pip install .[dev,autotuning] ds_report - name: HF transformers tests run: | if [[ -d ./torch-extensions ]]; then rm -rf ./torch-extensions; fi git clone https://github.com/huggingface/transformers cd transformers # if needed switch to the last known good SHA until transformers@master is fixed # git checkout 1cc453d33 git rev-parse --short HEAD # scipy/sklearn required for tests, using the 'dev' extra forces torch re-install pip install .[testing] # find reqs used in ds integration tests find examples/pytorch -regextype posix-egrep -regex '.*(language-modeling|question-answering|summarization|image-classification|text-classification|translation).*/requirements.txt' -exec pip install -r {} \; TORCH_EXTENSIONS_DIR=./torch-extensions RUN_SLOW=1 pytest --color=yes --durations=0 --verbose tests/deepspeed