summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorSR_team2024-02-23 01:21:29 +0200
committerSR_team2024-02-23 01:21:29 +0200
commitf53a39c4d115066ee7f65327a397004a0b1a2a6f (patch)
tree50782a447890188590c7d383ab66bfdf34b427c7
parent11935ad42633e1994199ad0b7e8de49804721996 (diff)
downloadaur-f53a39c4d115066ee7f65327a397004a0b1a2a6f.tar.gz
Modify `extra/ollama-cuda` package to build OLLAMA with latest git commit
-rw-r--r--.SRCINFO11
-rw-r--r--PKGBUILD38
2 files changed, 24 insertions, 25 deletions
diff --git a/.SRCINFO b/.SRCINFO
index bf82c500e7c4..803df352cb93 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
-pkgbase = ollama-cuda
+pkgbase = ollama-cuda-git
pkgdesc = Create, run and share large language models (LLMs) with CUDA
- pkgver = 0.1.25
+ pkgver = 0.1.27.g275ea015
pkgrel = 1
url = https://github.com/jmorganca/ollama
arch = x86_64
@@ -11,15 +11,14 @@ pkgbase = ollama-cuda
makedepends = go
provides = ollama
conflicts = ollama
- source = git+https://github.com/jmorganca/ollama#tag=v0.1.25
- source = llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=6c00a066928b0475b865a2e3e709e2166e02d548
+ conflicts = ollama-cuda
+ source = git+https://github.com/jmorganca/ollama
source = ollama.service
source = sysusers.conf
source = tmpfiles.d
b2sums = SKIP
- b2sums = SKIP
b2sums = a773bbf16cf5ccc2ee505ad77c3f9275346ddf412be283cfeaee7c2e4c41b8637a31aaff8766ed769524ebddc0c03cf924724452639b62208e578d98b9176124
b2sums = 3aabf135c4f18e1ad745ae8800db782b25b15305dfeaaa031b4501408ab7e7d01f66e8ebb5be59fc813cfbff6788d08d2e48dcf24ecc480a40ec9db8dbce9fec
b2sums = e8f2b19e2474f30a4f984b45787950012668bf0acb5ad1ebb25cd9776925ab4a6aa927f8131ed53e35b1c71b32c504c700fe5b5145ecd25c7a8284373bb951ed
-pkgname = ollama-cuda
+pkgname = ollama-cuda-git
diff --git a/PKGBUILD b/PKGBUILD
index 1eaf4f242ee5..bccf3d688019 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,38 +1,38 @@
# Maintainer: Alexander F. Rødseth <xyproto@archlinux.org>
# Contributor: Matt Harrison <matt@harrison.us.com>
-pkgname=ollama-cuda
+pkgname=ollama-cuda-git
pkgdesc='Create, run and share large language models (LLMs) with CUDA'
-pkgver=0.1.25
+pkgver=0.1.27.g275ea015
pkgrel=1
arch=(x86_64)
url='https://github.com/jmorganca/ollama'
license=(MIT)
-_ollamacommit=42e77e2a699ab0eb2f27fe8cde6f4b7f6eef225a # tag: v0.1.25
-# The llama.cpp git submodule commit hash can be found here:
-# https://github.com/jmorganca/ollama/tree/v0.1.25/llm
-_llama_cpp_commit=6c00a066928b0475b865a2e3e709e2166e02d548
makedepends=(cmake cuda git go)
provides=(ollama)
-conflicts=(ollama)
-source=(git+$url#tag=v$pkgver
- llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=$_llama_cpp_commit
+conflicts=(ollama ollama-cuda)
+source=(git+$url
ollama.service
sysusers.conf
tmpfiles.d)
b2sums=('SKIP'
- 'SKIP'
'a773bbf16cf5ccc2ee505ad77c3f9275346ddf412be283cfeaee7c2e4c41b8637a31aaff8766ed769524ebddc0c03cf924724452639b62208e578d98b9176124'
'3aabf135c4f18e1ad745ae8800db782b25b15305dfeaaa031b4501408ab7e7d01f66e8ebb5be59fc813cfbff6788d08d2e48dcf24ecc480a40ec9db8dbce9fec'
'e8f2b19e2474f30a4f984b45787950012668bf0acb5ad1ebb25cd9776925ab4a6aa927f8131ed53e35b1c71b32c504c700fe5b5145ecd25c7a8284373bb951ed')
-prepare() {
- cd ${pkgname/-cuda}
+pkgver() {
+ cd ${pkgname/-cuda-git}
+
+ local _tag=$(git describe --tags --abbrev=0 | sed "s/^v//")
+ local _commit=$(git describe --abbrev=8 --always)
+ echo "$_tag.g$_commit"
+}
- rm -frv llm/llama.cpp
+prepare() {
+ cd ${pkgname/-cuda-git}
- # Copy git submodule files instead of symlinking because the build process is sensitive to symlinks.
- cp -r "$srcdir/llama.cpp" llm/llama.cpp
+ # Clone submodules (llama.cpp)
+ git submodule update --init --recursive
# Turn LTO on and set the build type to Release
sed -i 's,T_CODE=on,T_CODE=on -D LLAMA_LTO=on -D CMAKE_BUILD_TYPE=Release,g' llm/generate/gen_linux.sh
@@ -42,7 +42,7 @@ prepare() {
}
build() {
- cd ${pkgname/-cuda}
+ cd ${pkgname/-cuda-git}
export CGO_CFLAGS="$CFLAGS" CGO_CPPFLAGS="$CPPFLAGS" CGO_CXXFLAGS="$CXXFLAGS" CGO_LDFLAGS="$LDFLAGS"
go generate ./...
go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external \
@@ -50,16 +50,16 @@ build() {
}
check() {
- cd ${pkgname/-cuda}
+ cd ${pkgname/-cuda-git}
go test ./api ./format
./ollama --version > /dev/null
}
package() {
- install -Dm755 ${pkgname/-cuda}/${pkgname/-cuda} "$pkgdir/usr/bin/${pkgname/-cuda}"
+ install -Dm755 ${pkgname/-cuda-git}/${pkgname/-cuda-git} "$pkgdir/usr/bin/${pkgname/-cuda-git}"
install -dm755 "$pkgdir/var/lib/ollama"
install -Dm644 ollama.service "$pkgdir/usr/lib/systemd/system/ollama.service"
install -Dm644 sysusers.conf "$pkgdir/usr/lib/sysusers.d/ollama.conf"
install -Dm644 tmpfiles.d "$pkgdir/usr/lib/tmpfiles.d/ollama.conf"
- install -Dm644 ${pkgname/-cuda}/LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
+ install -Dm644 ${pkgname/-cuda-git}/LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}