blob: a2bc0fc2b8dbe59305316f24a616e62dc3d2156a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
pkgbase = ik-llama.cpp-cuda
pkgdesc = llama.cpp fork with additional SOTA quants and improved performance (CUDA Backend)
pkgver = r3884.6d2e7ca4
pkgrel = 1
url = https://github.com/ikawrakow/ik_llama.cpp
arch = x86_64
arch = armv7h
arch = aarch64
license = MIT
makedepends = cmake
makedepends = git
depends = cuda
depends = nvidia-utils
depends = curl
depends = gcc-libs
depends = glibc
depends = python
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = libggml
conflicts = ggml
conflicts = llama.cpp
conflicts = llama.cpp-vulkan
conflicts = llama.cpp-cuda
conflicts = llama.cpp-hip
conflicts = ik-llama.cpp
conflicts = ik-llama.cpp-vulkan
options = lto
options = !debug
pkgname = ik-llama.cpp-cuda
|