publish.yml 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. # This workflow will:
  2. # - Create a new Github release
  3. # - Build wheels for supported architectures
  4. # - Deploy the wheels to the Github release
  5. # - Release the static code to PyPi
  6. # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
  7. name: Build wheels and deploy
  8. on:
  9. create:
  10. tags:
  11. - v*
  12. jobs:
  13. setup_release:
  14. name: Create Release
  15. runs-on: ubuntu-latest
  16. steps:
  17. - name: Get the tag version
  18. id: extract_branch
  19. run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/}
  20. shell: bash
  21. - name: Create Release
  22. id: create_release
  23. uses: actions/create-release@v1
  24. env:
  25. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  26. with:
  27. tag_name: ${{ steps.extract_branch.outputs.branch }}
  28. release_name: ${{ steps.extract_branch.outputs.branch }}
  29. build_wheels:
  30. name: Build Wheel
  31. needs: setup_release
  32. runs-on: ${{ matrix.os }}
  33. strategy:
  34. fail-fast: false
  35. matrix:
  36. # Using ubuntu-20.04 instead of 22.04 for more compatibility (glibc). Ideally we'd use the
  37. # manylinux docker image, but I haven't figured out how to install CUDA on manylinux.
  38. os: [ubuntu-20.04]
  39. python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
  40. torch-version: ['2.0.1', '2.1.2', '2.2.2', '2.3.1', '2.4.0.dev20240514']
  41. cuda-version: ['11.8.0', '12.3.2']
  42. # We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not.
  43. # Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI.
  44. # Without this we get import error (undefined symbol: _ZN3c105ErrorC2ENS_14SourceLocationESs)
  45. # when building without C++11 ABI and using it on nvcr images.
  46. cxx11_abi: ['FALSE', 'TRUE']
  47. exclude:
  48. # see https://github.com/pytorch/pytorch/blob/main/RELEASE.md#release-compatibility-matrix
  49. # Pytorch < 2.2 does not support Python 3.12
  50. - torch-version: '2.0.1'
  51. python-version: '3.12'
  52. - torch-version: '2.1.2'
  53. python-version: '3.12'
  54. # Pytorch <= 2.0 only supports CUDA <= 11.8
  55. - torch-version: '2.0.1'
  56. cuda-version: '12.3.2'
  57. steps:
  58. - name: Checkout
  59. uses: actions/checkout@v3
  60. - name: Set up Python
  61. uses: actions/setup-python@v4
  62. with:
  63. python-version: ${{ matrix.python-version }}
  64. - name: Set CUDA and PyTorch versions
  65. run: |
  66. echo "MATRIX_CUDA_VERSION=$(echo ${{ matrix.cuda-version }} | awk -F \. {'print $1 $2'})" >> $GITHUB_ENV
  67. echo "MATRIX_TORCH_VERSION=$(echo ${{ matrix.torch-version }} | awk -F \. {'print $1 "." $2'})" >> $GITHUB_ENV
  68. - name: Free up disk space
  69. if: ${{ runner.os == 'Linux' }}
  70. # https://github.com/easimon/maximize-build-space/blob/master/action.yml
  71. # https://github.com/easimon/maximize-build-space/tree/test-report
  72. run: |
  73. sudo rm -rf /usr/share/dotnet
  74. sudo rm -rf /opt/ghc
  75. sudo rm -rf /opt/hostedtoolcache/CodeQL
  76. - name: Set up swap space
  77. if: runner.os == 'Linux'
  78. uses: pierotofy/set-swap-space@v1.0
  79. with:
  80. swap-size-gb: 10
  81. - name: Install CUDA ${{ matrix.cuda-version }}
  82. if: ${{ matrix.cuda-version != 'cpu' }}
  83. uses: Jimver/cuda-toolkit@v0.2.14
  84. id: cuda-toolkit
  85. with:
  86. cuda: ${{ matrix.cuda-version }}
  87. linux-local-args: '["--toolkit"]'
  88. # default method is "local", and we're hitting some error with caching for CUDA 11.8 and 12.1
  89. # method: ${{ (matrix.cuda-version == '11.8.0' || matrix.cuda-version == '12.1.0') && 'network' || 'local' }}
  90. method: 'network'
  91. # We need the cuda libraries (e.g. cuSparse, cuSolver) for compiling PyTorch extensions,
  92. # not just nvcc
  93. # sub-packages: '["nvcc"]'
  94. - name: Install PyTorch ${{ matrix.torch-version }}+cu${{ matrix.cuda-version }}
  95. run: |
  96. pip install --upgrade pip
  97. # If we don't install before installing Pytorch, we get error for torch 2.0.1
  98. # ERROR: Could not find a version that satisfies the requirement setuptools>=40.8.0 (from versions: none)
  99. pip install lit
  100. # For some reason torch 2.2.0 on python 3.12 errors saying no setuptools
  101. pip install setuptools
  102. # We want to figure out the CUDA version to download pytorch
  103. # e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116
  104. # see https://github.com/pytorch/pytorch/blob/main/RELEASE.md#release-compatibility-matrix
  105. # This code is ugly, maybe there's a better way to do this.
  106. export TORCH_CUDA_VERSION=$(python -c "from os import environ as env; \
  107. minv = {'2.0': 117, '2.1': 118, '2.2': 118, '2.3': 118, '2.4': 118}[env['MATRIX_TORCH_VERSION']]; \
  108. maxv = {'2.0': 118, '2.1': 121, '2.2': 121, '2.3': 121, '2.4': 121}[env['MATRIX_TORCH_VERSION']]; \
  109. print(max(min(int(env['MATRIX_CUDA_VERSION']), maxv), minv))" \
  110. )
  111. if [[ ${{ matrix.torch-version }} == *"dev"* ]]; then
  112. pip install --no-cache-dir --pre torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION}
  113. else
  114. pip install --no-cache-dir torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/cu${TORCH_CUDA_VERSION}
  115. fi
  116. nvcc --version
  117. python --version
  118. python -c "import torch; print('PyTorch:', torch.__version__)"
  119. python -c "import torch; print('CUDA:', torch.version.cuda)"
  120. python -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)"
  121. shell:
  122. bash
  123. - name: Build wheel
  124. run: |
  125. # We want setuptools >= 49.6.0 otherwise we can't compile the extension if system CUDA version is 11.7 and pytorch cuda version is 11.6
  126. # https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/utils/cpp_extension.py#L810
  127. # However this still fails so I'm using a newer version of setuptools
  128. pip install setuptools==68.0.0
  129. pip install ninja packaging wheel
  130. export PATH=/usr/local/nvidia/bin:/usr/local/nvidia/lib64:$PATH
  131. export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH
  132. # Limit MAX_JOBS otherwise the github runner goes OOM
  133. # CUDA 11.8 can compile with 2 jobs, but CUDA 12.3 goes OOM
  134. MAX_JOBS=$([ "$MATRIX_CUDA_VERSION" == "123" ] && echo 1 || echo 2) FLASH_ATTENTION_FORCE_BUILD="TRUE" FLASH_ATTENTION_FORCE_CXX11_ABI=${{ matrix.cxx11_abi}} python setup.py bdist_wheel --dist-dir=dist
  135. tmpname=cu${MATRIX_CUDA_VERSION}torch${MATRIX_TORCH_VERSION}cxx11abi${{ matrix.cxx11_abi }}
  136. wheel_name=$(ls dist/*whl | xargs -n 1 basename | sed "s/-/+$tmpname-/2")
  137. ls dist/*whl |xargs -I {} mv {} dist/${wheel_name}
  138. echo "wheel_name=${wheel_name}" >> $GITHUB_ENV
  139. - name: Log Built Wheels
  140. run: |
  141. ls dist
  142. - name: Get the tag version
  143. id: extract_branch
  144. run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/}
  145. - name: Get Release with tag
  146. id: get_current_release
  147. uses: joutvhu/get-release@v1
  148. with:
  149. tag_name: ${{ steps.extract_branch.outputs.branch }}
  150. env:
  151. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  152. - name: Upload Release Asset
  153. id: upload_release_asset
  154. uses: actions/upload-release-asset@v1
  155. env:
  156. GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
  157. with:
  158. upload_url: ${{ steps.get_current_release.outputs.upload_url }}
  159. asset_path: ./dist/${{env.wheel_name}}
  160. asset_name: ${{env.wheel_name}}
  161. asset_content_type: application/*
  162. publish_package:
  163. name: Publish package
  164. needs: [build_wheels]
  165. runs-on: ubuntu-latest
  166. steps:
  167. - uses: actions/checkout@v3
  168. - uses: actions/setup-python@v4
  169. with:
  170. python-version: '3.10'
  171. - name: Install dependencies
  172. run: |
  173. pip install ninja packaging setuptools wheel twine
  174. # We don't want to download anything CUDA-related here
  175. pip install torch --index-url https://download.pytorch.org/whl/cpu
  176. - name: Build core package
  177. env:
  178. FLASH_ATTENTION_SKIP_CUDA_BUILD: "TRUE"
  179. run: |
  180. python setup.py sdist --dist-dir=dist
  181. - name: Deploy
  182. env:
  183. TWINE_USERNAME: "__token__"
  184. TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
  185. run: |
  186. python -m twine upload dist/*