summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander F. Rødseth2024-02-06 18:43:17 +0100
committerAlexander F. Rødseth2024-02-06 18:43:17 +0100
commite5c7bf3987de98333e60ea5a1080114573546cc4 (patch)
treea32e9ca5ec3f7ca1444a3d64783f318d0e1be182
parent7f8b787e9c681104ceb9a4178067dbc72e0ae074 (diff)
downloadaur-e5c7bf3987de98333e60ea5a1080114573546cc4.tar.gz
upgpkg: 0.1.23-1
-rw-r--r--.SRCINFO8
-rw-r--r--PKGBUILD13
2 files changed, 12 insertions, 9 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 9bb4128ae201..3200bac40f41 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = ollama-cuda
pkgdesc = Create, run and share large language models (LLMs) with CUDA
- pkgver = 0.1.22
- pkgrel = 2
+ pkgver = 0.1.23
+ pkgrel = 1
url = https://github.com/jmorganca/ollama
arch = x86_64
license = MIT
@@ -11,8 +11,8 @@ pkgbase = ollama-cuda
makedepends = go
provides = ollama
conflicts = ollama
- source = git+https://github.com/jmorganca/ollama#tag=v0.1.22
- source = llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=cd4fddb29f81d6a1f6d51a0c016bc6b486d68def
+ source = git+https://github.com/jmorganca/ollama#tag=v0.1.23
+ source = llama.cpp::git+https://github.com/ggerganov/llama.cpp#commit=d2f650cb5b04ee2726663e79b47da5efe196ce00
source = sysusers.conf
source = tmpfiles.d
source = ollama.service
diff --git a/PKGBUILD b/PKGBUILD
index 2857a5396f3b..b11578cd8af3 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -3,15 +3,15 @@
pkgname=ollama-cuda
pkgdesc='Create, run and share large language models (LLMs) with CUDA'
-pkgver=0.1.22
-pkgrel=2
+pkgver=0.1.23
+pkgrel=1
arch=(x86_64)
url='https://github.com/jmorganca/ollama'
license=(MIT)
-_ollamacommit=a47d8b2557259ffc9881817df97fbf6d6824e89e # tag: v0.1.22
+_ollamacommit=09a6f76f4c30fb8a9708680c519d08feeb504197 # tag: v0.1.23
# The llama.cpp git submodule commit hash can be found here:
-# https://github.com/jmorganca/ollama/tree/v0.1.22/llm
-_llama_cpp_commit=cd4fddb29f81d6a1f6d51a0c016bc6b486d68def
+# https://github.com/jmorganca/ollama/tree/v0.1.23/llm
+_llama_cpp_commit=d2f650cb5b04ee2726663e79b47da5efe196ce00
makedepends=(cmake cuda git go)
provides=(ollama)
conflicts=(ollama)
@@ -36,6 +36,9 @@ prepare() {
# Turn LTO on and set the build type to Release
sed -i 's,T_CODE=on,T_CODE=on -D LLAMA_LTO=on -D CMAKE_BUILD_TYPE=Release,g' llm/generate/gen_linux.sh
+
+ # Display a more helpful error message
+ sed -i "s|could not connect to ollama server, run 'ollama serve' to start it|ollama is not running, try 'systemctl start ollama'|g" cmd/cmd.go
}
build() {