diff options
author | txtsd | 2024-11-09 11:58:34 +0530 |
---|---|---|
committer | txtsd | 2024-11-09 11:58:34 +0530 |
commit | 1dbb515663ddfa791ea1fc5e45193f92dd913191 (patch) | |
tree | f6d3141c8f79e5ce39a501bd433407eb24571e36 | |
parent | 35547d6ecacdec7127ce32caa6338950e71b3997 (diff) | |
download | aur-1dbb515663ddfa791ea1fc5e45193f92dd913191.tar.gz |
upgpkg: llama.cpp-sycl-f32 b4053-1
Upstream Release
Signed-off-by: txtsd <code@ihavea.quest>
-rw-r--r-- | .SRCINFO | 6 | ||||
-rw-r--r-- | PKGBUILD | 76 |
2 files changed, 45 insertions, 37 deletions
@@ -1,6 +1,6 @@ pkgbase = llama.cpp-sycl-f32 pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Intel SYCL GPU optimizations and F32) - pkgver = b4033 + pkgver = b4053 pkgrel = 1 url = https://github.com/ggerganov/llama.cpp arch = x86_64 @@ -21,11 +21,11 @@ pkgbase = llama.cpp-sycl-f32 provides = llama.cpp conflicts = llama.cpp options = lto - source = git+https://github.com/ggerganov/llama.cpp#tag=b4033 + source = git+https://github.com/ggerganov/llama.cpp#tag=b4053 source = git+https://github.com/nomic-ai/kompute.git source = llama.cpp.conf source = llama.cpp.service - sha256sums = 3299f2b01218723720e42f9bd035d14ec06eb1df86d1e77ce3ff1b9f2c96bc40 + sha256sums = 2e700ceb1142b07c1647d090027a5a5cbd230b316c88a78c7a47afe525ab3033 sha256sums = SKIP sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87 sha256sums = 065f69ccd7ac40d189fae723b58d6de2a24966e9b526e0dbfa3035a4c46a7669 @@ -2,26 +2,26 @@ pkgname=llama.cpp-sycl-f32 _pkgname=${pkgname%%-sycl-f32} -pkgver=b4033 +pkgver=b4053 pkgrel=1 pkgdesc="Port of Facebook's LLaMA model in C/C++ (with Intel SYCL GPU optimizations and F32)" -arch=('x86_64' 'armv7h' 'aarch64') +arch=(x86_64 armv7h aarch64) url='https://github.com/ggerganov/llama.cpp' license=('MIT') depends=( - 'curl' - 'gcc-libs' - 'glibc' - 'intel-oneapi-basekit' - 'python' - 'python-numpy' - 'python-sentencepiece' + curl + gcc-libs + glibc + intel-oneapi-basekit + python + python-numpy + python-sentencepiece ) makedepends=( - 'cmake' - 'git' - 'openmp' - 'procps-ng' + cmake + git + openmp + procps-ng ) provides=(${_pkgname}) conflicts=(${_pkgname}) @@ -29,10 +29,10 @@ options=(lto) source=( "git+${url}#tag=${pkgver}" "git+https://github.com/nomic-ai/kompute.git" - 'llama.cpp.conf' - 'llama.cpp.service' + llama.cpp.conf + llama.cpp.service ) -sha256sums=('3299f2b01218723720e42f9bd035d14ec06eb1df86d1e77ce3ff1b9f2c96bc40' +sha256sums=('2e700ceb1142b07c1647d090027a5a5cbd230b316c88a78c7a47afe525ab3033' 'SKIP' '53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87' '065f69ccd7ac40d189fae723b58d6de2a24966e9b526e0dbfa3035a4c46a7669') @@ -46,29 +46,37 @@ prepare() { } build() { - cd "${_pkgname}" source /opt/intel/oneapi/setvars.sh - cmake -S . -B build \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DBUILD_SHARED_LIBS=ON \ - -DGGML_ALL_WARNINGS_3RD_PARTY=ON \ - -DGGML_BLAS=ON \ - -DGGML_LTO=ON \ - -DGGML_RPC=ON \ - -DLLAMA_CURL=ON \ - -DLLAMA_FATAL_WARNINGS=ON \ - -DCMAKE_C_COMPILER=icx \ - -DCMAKE_CXX_COMPILER=icpx \ + local _cmake_options=( + -B build + -S "${_pkgname}" + -DCMAKE_BUILD_TYPE=None + -DCMAKE_INSTALL_PREFIX='/usr' + -DGGML_NATIVE=OFF + -DGGML_AVX2=OFF + -DGGML_AVX=OFF + -DGGML_F16C=OFF + -DGGML_FMA=OFF + -DGGML_ALL_WARNINGS=OFF + -DGGML_ALL_WARNINGS_3RD_PARTY=OFF + -DGGML_LTO=ON + -DGGML_RPC=ON + -DLLAMA_CURL=ON + -DGGML_BLAS=ON + -DCMAKE_C_COMPILER=icx + -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON - cmake --build build --config Release + -Wno-dev + ) + cmake "${_cmake_options[@]}" + cmake --build build } package() { - cd "${_pkgname}" - DESTDIR="${pkgdir}" cmake --install build - install -Dm644 'LICENSE' "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" - install -Dm644 "${srcdir}/llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp" - install -Dm644 "${srcdir}/llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service" + install -Dm644 "${_pkgname}/LICENSE" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" + + install -Dm644 "llama.cpp.conf" "${pkgdir}/etc/conf.d/llama.cpp" + install -Dm644 "llama.cpp.service" "${pkgdir}/usr/lib/systemd/system/llama.cpp.service" } |