diff options
-rw-r--r-- | .SRCINFO | 6 | ||||
-rw-r--r-- | PKGBUILD | 4 |
2 files changed, 5 insertions, 5 deletions
@@ -1,6 +1,6 @@ pkgbase = llama.cpp-hip pkgdesc = Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations) - pkgver = b5142 + pkgver = b5143 pkgrel = 1 url = https://github.com/ggerganov/llama.cpp arch = x86_64 @@ -28,11 +28,11 @@ pkgbase = llama.cpp-hip provides = llama.cpp conflicts = llama.cpp options = lto - source = git+https://github.com/ggerganov/llama.cpp#tag=b5142 + source = git+https://github.com/ggerganov/llama.cpp#tag=b5143 source = git+https://github.com/nomic-ai/kompute.git source = llama.cpp.conf source = llama.cpp.service - sha256sums = a7ba0b8f4d9403379a6664a84e827a8c9b2ce9ef09d271e1a0f78c2cf8c51435 + sha256sums = a22ba58854e2612c632576cb1ece8ed541f6a178050f9e43ace42b280b3d299d sha256sums = SKIP sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d @@ -2,7 +2,7 @@ pkgname=llama.cpp-hip _pkgname="${pkgname%-hip}" -pkgver=b5142 +pkgver=b5143 pkgrel=1 pkgdesc="Port of Facebook's LLaMA model in C/C++ (with AMD ROCm optimizations)" arch=(x86_64 armv7h aarch64) @@ -39,7 +39,7 @@ source=( llama.cpp.conf llama.cpp.service ) -sha256sums=('a7ba0b8f4d9403379a6664a84e827a8c9b2ce9ef09d271e1a0f78c2cf8c51435' +sha256sums=('a22ba58854e2612c632576cb1ece8ed541f6a178050f9e43ace42b280b3d299d' 'SKIP' '53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87' '0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d') |