Skip to content

Commit 241c682

Browse files
committed
[CI] Switch back to CUDA 12.4
1 parent c555642 commit 241c682

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

.github/workflows/publish.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ jobs:
4545
os: [ubuntu-20.04]
4646
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
4747
torch-version: ['2.1.2', '2.2.2', '2.3.1', '2.4.0', '2.5.1']
48-
cuda-version: ['11.8.0', '12.3.2']
48+
cuda-version: ['11.8.0', '12.4.1']
4949
# We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not.
5050
# Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI.
5151
# Without this we get import error (undefined symbol: _ZN3c105ErrorC2ENS_14SourceLocationESs)

flash_attn/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "2.7.0"
1+
__version__ = "2.7.0.post1"
22

33
from flash_attn.flash_attn_interface import (
44
flash_attn_func,

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -436,9 +436,9 @@ def get_wheel_url():
436436
# We're using the CUDA version used to build torch, not the one currently installed
437437
# _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME)
438438
torch_cuda_version = parse(torch.version.cuda)
439-
# For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.3
439+
# For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.4
440440
# to save CI time. Minor versions should be compatible.
441-
torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.3")
441+
torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.4")
442442
# cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}"
443443
cuda_version = f"{torch_cuda_version.major}"
444444

0 commit comments

Comments
 (0)