diff options
author | txtsd | 2024-12-03 20:47:31 +0000 |
---|---|---|
committer | txtsd | 2024-12-03 20:47:31 +0000 |
commit | c7e2b36d440e1761c296e103030eaa9d18e9b4ec (patch) | |
tree | 7e21229142ec91d563173c547aa203f060558761 /.SRCINFO | |
parent | 4b61b6a0dd916428e2c4b48bb76cfea619d2254f (diff) | |
download | aur-c7e2b36d440e1761c296e103030eaa9d18e9b4ec.tar.gz |
upgpkg: llama.cpp-cuda b4255-1
Diffstat (limited to '.SRCINFO')
-rw-r--r-- | .SRCINFO | 6 |
1 files changed, 3 insertions, 3 deletions
@@ -1,6 +1,6 @@ pkgbase = llama.cpp-cuda pkgdesc = Port of Facebook's LLaMA model in C/C++ (with NVIDIA CUDA optimizations) - pkgver = b4254 + pkgver = b4255 pkgrel = 1 url = https://github.com/ggerganov/llama.cpp arch = x86_64 @@ -23,11 +23,11 @@ pkgbase = llama.cpp-cuda conflicts = llama.cpp conflicts = libggml options = lto - source = git+https://github.com/ggerganov/llama.cpp#tag=b4254 + source = git+https://github.com/ggerganov/llama.cpp#tag=b4255 source = git+https://github.com/nomic-ai/kompute.git source = llama.cpp.conf source = llama.cpp.service - sha256sums = aa3789c5511c0fa07ca672be01b17910d335273bfeb1ebd28ac3719c725129b8 + sha256sums = 877ff85ad8b705163b920a765eda40a7d6f7a9e833b401a2ed4e486ce2fa738e sha256sums = SKIP sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 sha256sums = 0377d08a07bda056785981d3352ccd2dbc0387c4836f91fb73e6b790d836620d |