blob: a1311a444ee72be96ea9797344ab5e3463524d78 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
# Maintainer: Orion-zhen <https://github.com/Orion-zhen>
pkgname=ik-llama.cpp
_pkgname=ik_llama.cpp
pkgver=1.0
pkgrel=1
pkgdesc="llama.cpp fork with additional SOTA quants and improved performance (OpebBLAS Backend)"
arch=(x86_64 armv7h aarch64)
url="https://github.com/ikawrakow/ik_llama.cpp"
license=("MIT")
depends=(
openblas
openblas64
blas64-openblas
curl
gcc-libs
glibc
python
)
makedepends=(
cmake
git
)
conflicts=(
libggml
ggml
llama.cpp
llama.cpp-vulkan
llama.cpp-cuda
llama.cpp-hip
)
provides=(llama.cpp)
options=(lto !debug)
source=("git+${url}")
sha256sums=("SKIP")
pkgver() {
cd "$_pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
local _cmake_options=(
-B build
-S "${_pkgname}"
-DCMAKE_INSTALL_PREFIX='/usr'
-DBUILD_SHARED_LIBS=ON
-DLLAMA_CURL=ON
-DLLAMA_BUILD_TESTS=OFF
-DLLAMA_USE_SYSTEM_GGML=OFF
-DGGML_ALL_WARNINGS=OFF
-DGGML_ALL_WARNINGS_3RD_PARTY=OFF
-DGGML_BUILD_EXAMPLES=OFF
-DGGML_BUILD_TESTS=OFF
-DGGML_BLAS=ON
-DGGML_BLAS_VENDOR=OpenBLAS
-DGGML_CCACHE=OFF
-DGGML_LTO=ON
-DGGML_RPC=ON
-Wno-dev
)
cmake "${_cmake_options[@]}"
cmake --build build --config Release
}
package() {
DESTDIR="${pkgdir}" cmake --install build
}
|