blob: 5b7c35f1af15566f04408ea121f9fb9b59975aef (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
pkgbase = python-flash-attention
pkgdesc = Fast and memory-efficient exact attention
pkgver = 2.7.4
pkgrel = 1
url = https://github.com/Dao-AILab/flash-attention
arch = x86_64
license = Apache-2.0
makedepends = ninja
makedepends = python-build
makedepends = python-installer
makedepends = python-packaging
makedepends = python-psutil
makedepends = python-setuptools
makedepends = python-wheel
depends = python-einops
depends = python-pytorch-cuda
source = flash-attention-2.7.4.tar.gz::https://github.com/HazyResearch/flash-attention/archive/refs/tags/v2.7.4.tar.gz
source = cutlass-4c42f73f.tar.gz::https://github.com/NVIDIA/cutlass/archive/4c42f73fdab5787e3bb57717f35a8cb1b3c0dc6d.tar.gz
source = flash-attention.diff
sha256sums = e55f8df2ab4bc57e7e33bc38e76f3b205f27ce6e3f7583009f6b26244b9a08c3
sha256sums = d9f1831aef8913fc281429c426ee46d992f69e4afea4c78a0d975f6ad649f994
sha256sums = SKIP
pkgname = python-flash-attention
|