blob: 7771ca3760a58002e87f6d39850e9269bc3a6b23 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
# Maintainer: txtsd <aur.archlinux@ihavea.quest>
pkgname=llama.cpp
pkgver=b5998
pkgrel=1
pkgdesc="Port of Facebook's LLaMA model in C/C++"
arch=(x86_64 armv7h aarch64)
url='https://github.com/ggerganov/llama.cpp'
license=('MIT')
depends=(
curl
gcc-libs
glibc
python
python-numpy
python-sentencepiece
)
makedepends=(
cmake
git
)
optdepends=(python-pytorch)
conflicts=(libggml ggml)
options=(lto !debug)
source=(
"git+${url}#tag=${pkgver}"
llama.cpp.conf
llama.cpp.service
)
sha256sums=('753ee5cd281dd58ae06b198f14fa1625f1779777012b130f3511170d940b7707'
'53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87'
'0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d')
build() {
local _cmake_options=(
-B build
-S "${pkgname}"
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_INSTALL_PREFIX='/usr'
-DBUILD_SHARED_LIBS=ON
-DLLAMA_CURL=ON
-DLLAMA_BUILD_TESTS=OFF
-DLLAMA_USE_SYSTEM_GGML=OFF
-DGGML_ALL_WARNINGS=OFF
-DGGML_ALL_WARNINGS_3RD_PARTY=OFF
-DGGML_BUILD_EXAMPLES=OFF
-DGGML_BUILD_TESTS=OFF
-DGGML_LTO=ON
-DGGML_RPC=ON
-DGGML_BLAS=OFF
-Wno-dev
)
cmake "${_cmake_options[@]}"
cmake --build build
}
# check() {
# ctest --test-dir build --output-on-failure -L 'main|curl' --verbose --timeout 900
# }
package() {
DESTDIR="${pkgdir}" cmake --install build
install -Dm644 "${pkgname}/LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
install -Dm644 "llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp"
install -Dm644 "llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service"
}
|