Package Details: llama-cpp-opencl c3e53b4-1

Git Clone URL: https://aur.archlinux.org/llama-cpp.git (read-only, click to copy)
Package Base: llama-cpp
Description: Port of Facebook's LLaMA model in C/C++ (with OpenCL)
Upstream URL: https://github.com/ggerganov/llama.cpp
Licenses: GPL3
Conflicts: llama-cpp
Provides: llama-cpp
Submitter: Freed
Maintainer: Freed
Last Packager: Freed
Votes: 1
Popularity: 0.20
First Submitted: 2023-07-18 07:59 (UTC)
Last Updated: 2023-08-24 11:40 (UTC)

Latest Comments

lmat commented on 2024-04-03 13:02 (UTC)

I just tried to build this and got:

curl: (56) The requested URL returned error: 404
ERROR: Failure while downloading https://github.com/ggerganov/llama.cpp/archive/master-c3e53b4.tar.gz

I changed the source to https://github.com/ggerganov/llama.cpp/archive/refs/tags/b2586.tar.gz, and hoping for the best.

dront78 commented on 2023-09-08 07:51 (UTC) (edited on 2023-09-08 07:52 (UTC) by dront78)

b1198 PKGBUILD


#!/usr/bin/env -S sh -c 'nvchecker -cnvchecker.toml --logger=json | jq -r '\''.version | sub("^v"; "") | split("-") | .[-1]'\'' | xargs -i{} sed -i "s/^\\(pkgver=\\).*/\\1{}/" $0'
# shellcheck shell=bash disable=SC2034,SC2154
# ex: nowrap
# Maintainer: Wu Zhenyu <wuzhenyu@ustc.edu>
_pkgname=llama.cpp
pkgbase=llama-cpp
pkgname=("$pkgbase" "$pkgbase-cuda" "$pkgbase-opencl")
pkgver=b1198
pkgrel=1
pkgdesc="Port of Facebook's LLaMA model in C/C++"
arch=(x86 x86_64 arm aarch64)
url=https://github.com/ggerganov/llama.cpp
depends=(openmpi python-numpy python-sentencepiece)
makedepends=(cmake intel-oneapi-dpcpp-cpp cuda intel-oneapi-mkl clblast)
license=(GPL3)
source=("$url/archive/refs/tags/$pkgver.tar.gz")
sha256sums=('1c9494b2d98f6f32942f5b5ee1b59260384ab9fcc0a12867b23544e08f64bd1b')

_build() {
    cd "$_pkgname-$pkgver" || return 1
    # https://github.com/ggerganov/llama.cpp/pull/2277
    sed -i 's/NOT DepBLAS/NOT DepBLAS_FOUND/' CMakeLists.txt
    cmake -B$1 -DCMAKE_INSTALL_PREFIX=/usr -DLLAMA_MPI=ON -DBUILD_SHARED_LIBS=ON \
        ${*:2:$#}
    cmake --build $1
}

_package() {
    DESTDIR="$pkgdir" cmake --install $1
    mv $pkgdir/usr/bin/main $pkgdir/usr/bin/llama
    mv $pkgdir/usr/bin/server $pkgdir/usr/bin/llama-server
}

package_llama-cpp() {
    local _arch data_type_model
    _arch="$(uname -m)"
    if [[ "$_arch" != x86* ]]; then
        depends+=(openblas)
        _build build -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
    else
        if [[ "$_arch" == x86_64 ]]; then
            data_type_model=64lp
        else
            data_type_model=32
        fi
        depends+=(intel-oneapi-mkl)
        _build build -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_"$data_type_model" -DCMAKE_C_COMPILER=/opt/intel/oneapi/compiler/latest/linux/bin/icx -DCMAKE_CXX_COMPILER=/opt/intel/oneapi/compiler/latest/linux/bin/icpx
    fi
    _package build
}

package_llama-cpp-cuda() {
    pkgdesc="${pkgdesc} (with CUDA)"
    depends+=(cuda)
    provides=(llama-cpp)
    conflicts=(llama-cpp)

    _build build-cuda -DLLAMA_CUBLAS=ON
    _package build-cuda
}

package_llama-cpp-opencl() {
    pkgdesc="${pkgdesc} (with OpenCL)"
    depends+=(clblast)
    provides=(llama-cpp)
    conflicts=(llama-cpp)

    _build build-opencl -DLLAMA_CLBLAST=ON
    _package build-opencl
}

colobas commented on 2023-09-01 18:09 (UTC)

I used the following patch to get this to build. Using release tags as pkgver.

From 6e47ffdc7baf6fa60fad2d9b3f9b8dc29b3d3ee1 Mon Sep 17 00:00:00 2001
From: Guilherme Pires <guilherme.pires@alleninstitute.org>
Date: Fri, 1 Sep 2023 11:01:50 -0700
Subject: [PATCH] use tags for versioning

---
 PKGBUILD | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/PKGBUILD b/PKGBUILD
index fd6d00f..8d867ec 100755
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -5,7 +5,7 @@
 _pkgname=llama.cpp
 pkgbase=llama-cpp
 pkgname=("$pkgbase" "$pkgbase-cuda" "$pkgbase-opencl")
-pkgver=c3e53b4
+pkgver=b1147
 pkgrel=1
 pkgdesc="Port of Facebook's LLaMA model in C/C++"
 arch=(x86 x86_64 arm aarch64)
@@ -13,11 +13,11 @@ url=https://github.com/ggerganov/llama.cpp
 depends=(openmpi python-numpy python-sentencepiece)
 makedepends=(cmake intel-oneapi-dpcpp-cpp cuda intel-oneapi-mkl clblast)
 license=(GPL3)
-source=("$url/archive/master-$pkgver.tar.gz")
-sha256sums=('7bf8a74bd3393b2c96abca17099487dccdd114c6bb5bb59b70daf02efe437606')
+source=("$url/archive/refs/tags/$pkgver.tar.gz")
+sha256sums=('d6e0fbd1e21ca27aef90e71ad62d45ae16696483c4183fa1cfad9deb0da5abec')

 _build() {
-   cd "$_pkgname-master-$pkgver" || return 1
+   cd "$_pkgname-$pkgver" || return 1

    # https://github.com/ggerganov/llama.cpp/pull/2277
    sed -i 's/NOT DepBLAS/NOT DepBLAS_FOUND/' CMakeLists.txt
-- 
2.42.0

sunng commented on 2023-08-07 03:35 (UTC)

Is cuda required for this opencl package?