summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorgit user2024-04-17 17:09:27 +0200
committergit user2024-04-17 17:09:27 +0200
commita7dc37fcaf4ff2b8b21100e83bf03891f2e3bb37 (patch)
tree034f22f5630dc57876fe23fa725c857c92829ac5
downloadaur-a7dc37fcaf4ff2b8b21100e83bf03891f2e3bb37.tar.gz
Initial Commit.
-rw-r--r--.SRCINFO72
-rw-r--r--PKGBUILD418
-rw-r--r--disable-rocm-cuda.gen_linux.sh.patch117
-rw-r--r--ollama.service20
-rw-r--r--sysusers.conf2
-rw-r--r--tmpfiles.d1
6 files changed, 630 insertions, 0 deletions
diff --git a/.SRCINFO b/.SRCINFO
new file mode 100644
index 000000000000..2f08a4c4cc01
--- /dev/null
+++ b/.SRCINFO
@@ -0,0 +1,72 @@
+pkgbase = ollama-nogpu-git
+ pkgdesc = Create, run and share large language models (LLMs). Package(s) without dedicated GPU offloading (no CUDA, no ROCm, no SYCL).
+ pkgver = 0.1.32+1.r2377.20240416.9df6c85c
+ pkgrel = 1
+ url = https://github.com/jmorganca/ollama
+ arch = armv7h
+ arch = aarch64
+ arch = i686
+ arch = x86_64
+ license = MIT
+ makedepends = bash
+ makedepends = cmake
+ makedepends = git
+ makedepends = go
+ makedepends = openmpi
+ makedepends = openblas
+ depends = gcc-libs
+ depends = glibc
+ depends = openssl
+ options = emptydirs
+ source = ollama::git+https://github.com/jmorganca/ollama.git
+ source = llama.cpp::git+https://github.com/ggerganov/llama.cpp.git
+ source = disable-rocm-cuda.gen_linux.sh.patch
+ source = ollama.service
+ source = sysusers.conf
+ source = tmpfiles.d
+ b2sums = SKIP
+ b2sums = SKIP
+ b2sums = 490289e7afe8720792a7890636737147041a1f60d91f70f1b39a758f75e00ebc21724a1beb4d6ec74b0ff868636c887953f50fffbb998edc56ab087fe9477bbb
+ b2sums = a773bbf16cf5ccc2ee505ad77c3f9275346ddf412be283cfeaee7c2e4c41b8637a31aaff8766ed769524ebddc0c03cf924724452639b62208e578d98b9176124
+ b2sums = 3aabf135c4f18e1ad745ae8800db782b25b15305dfeaaa031b4501408ab7e7d01f66e8ebb5be59fc813cfbff6788d08d2e48dcf24ecc480a40ec9db8dbce9fec
+ b2sums = e8f2b19e2474f30a4f984b45787950012668bf0acb5ad1ebb25cd9776925ab4a6aa927f8131ed53e35b1c71b32c504c700fe5b5145ecd25c7a8284373bb951ed
+
+pkgname = ollama-generic-git
+ pkgdesc = Create, run and share large language models (LLMs). CPU optimisation only.
+ depends = gcc-libs
+ depends = glibc
+ depends = openssl
+ optdepends =
+ provides = ollama=0.1.32+1.r2377.20240416.9df6c85c
+ provides = ollama-git=0.1.32+1.r2377.20240416.9df6c85c
+ conflicts = ollama
+
+pkgname = ollama-openmpi-git
+ pkgdesc = Create, run and share large language models (LLMs). CPU optimisation with openMPI.
+ depends = gcc-libs
+ depends = glibc
+ depends = openssl
+ optdepends =
+ provides = ollama=0.1.32+1.r2377.20240416.9df6c85c
+ provides = ollama-git=0.1.32+1.r2377.20240416.9df6c85c
+ conflicts = ollama
+
+pkgname = ollama-openblas-git
+ pkgdesc = Create, run and share large language models (LLMs). CPU optimisation with openblas.
+ depends = gcc-libs
+ depends = glibc
+ depends = openssl
+ optdepends =
+ provides = ollama=0.1.32+1.r2377.20240416.9df6c85c
+ provides = ollama-git=0.1.32+1.r2377.20240416.9df6c85c
+ conflicts = ollama
+
+pkgname = ollama-vulkan-git
+ pkgdesc = Create, run and share large language models (LLMs). With vulkan backend.
+ depends = gcc-libs
+ depends = glibc
+ depends = openssl
+ optdepends =
+ provides = ollama=0.1.32+1.r2377.20240416.9df6c85c
+ provides = ollama-git=0.1.32+1.r2377.20240416.9df6c85c
+ conflicts = ollama
diff --git a/PKGBUILD b/PKGBUILD
new file mode 100644
index 000000000000..31e7b8f94260
--- /dev/null
+++ b/PKGBUILD
@@ -0,0 +1,418 @@
+# Maintainer: dreieck
+# Contributor: Alexander F. Rødseth <xyproto@archlinux.org>
+# Contributor: Matt Harrison <matt@harrison.us.com>
+
+## `PKGBUILD` based on the `PKGBUILD` for the package `ollama-rocm-git`.
+
+_build_generic=true
+_build_openmpi=true
+_build_openblas=true
+_build_clblas=false # 2024-04-17: Fails to link with
+ # ```
+ # /usr/bin/ld: /var/cache/makepkg/build/ollama-git/src/ollama-clblas/llm/build/linux/x86_64_static/libllama.a(ggml-opencl.cpp.o): in function `ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer*, ggml_tensor const*, void*, unsigned long, unsigned long)':
+ # ggml-opencl.cpp:(.text+0x46): undefined reference to `clEnqueueReadBuffer'
+ # ```
+ # and more errors.
+_build_vulkan=true
+
+_name="ollama"
+_pkgbase="${_name}-nogpu"
+pkgbase="${_pkgbase}-git"
+pkgname=()
+if "${_build_generic}"; then
+ pkgname+=("${_name}-generic-git")
+fi
+if "${_build_openmpi}"; then
+ pkgname+=("${_name}-openmpi-git")
+fi
+if "${_build_openblas}"; then
+ pkgname+=("${_name}-openblas-git")
+fi
+if "${_build_clblas}"; then
+ pkgname+=("${_name}-clblas-git")
+fi
+if "${_build_vulkan}"; then
+ pkgname+=("${_name}-vulkan-git")
+fi
+pkgdesc='Create, run and share large language models (LLMs). Package(s) without dedicated GPU offloading (no CUDA, no ROCm, no SYCL).'
+pkgver=0.1.32+1.r2377.20240416.9df6c85c
+pkgrel=1
+arch=(
+ 'armv7h'
+ 'aarch64'
+ 'i686'
+ 'x86_64'
+)
+url='https://github.com/jmorganca/ollama'
+license=(
+ "MIT"
+)
+depends=(
+ 'gcc-libs'
+ 'glibc'
+ 'openssl'
+)
+makedepends=(
+ "bash"
+ "cmake"
+ "git"
+ "go"
+)
+if "${_build_openmpi}"; then
+ makedepends+=("openmpi")
+fi
+if "${_build_openblas}"; then
+ makedepends+=("openblas")
+fi
+if "${_build_clblas}"; then
+ makedepends+=("clblast")
+fi
+if "${_build_vulkan}"; then
+ makedepends+=()
+fi
+source=(
+ "${_name}::git+${url}.git"
+ "llama.cpp::git+https://github.com/ggerganov/llama.cpp.git" # Submodule
+ 'disable-rocm-cuda.gen_linux.sh.patch'
+ "ollama.service"
+ "sysusers.conf"
+ "tmpfiles.d"
+)
+b2sums=(
+ 'SKIP'
+ 'SKIP'
+ '490289e7afe8720792a7890636737147041a1f60d91f70f1b39a758f75e00ebc21724a1beb4d6ec74b0ff868636c887953f50fffbb998edc56ab087fe9477bbb'
+ 'a773bbf16cf5ccc2ee505ad77c3f9275346ddf412be283cfeaee7c2e4c41b8637a31aaff8766ed769524ebddc0c03cf924724452639b62208e578d98b9176124'
+ '3aabf135c4f18e1ad745ae8800db782b25b15305dfeaaa031b4501408ab7e7d01f66e8ebb5be59fc813cfbff6788d08d2e48dcf24ecc480a40ec9db8dbce9fec'
+ 'e8f2b19e2474f30a4f984b45787950012668bf0acb5ad1ebb25cd9776925ab4a6aa927f8131ed53e35b1c71b32c504c700fe5b5145ecd25c7a8284373bb951ed'
+)
+options+=('emptydirs')
+#options+=('!lto') # openmpi variant fails to link without LTO.
+
+_check_cpufeature() {
+ ## Checks if the host CPU supports the feature passed as argument "$1".
+ # If yes, return "ON" to stdout.
+ # If not yes, return "OFF" to stdout.
+
+ if grep -qE "\<$1" /proc/cpuinfo; then
+ printf '%s' 'ON'
+ else
+ printf '%s' 'OFF'
+ fi
+}
+
+_check_makepkgpotion() {
+ ## Checks if the given makepkg option is set in `options`.
+ # If yes, return "ON" to stdout.
+ # If not yes, return "OFF" to stdout.
+
+ _checkfor="$1"
+ _result='OFF'
+ for _option in "${options[@]}"; do
+ if grep -qw "${_checkfor}" <<<"${_option}"; then
+ _result="ON"
+ fi
+ if grep -qw "\!${_checkfor}" <<<"${_option}" 2>/dev/null; then
+ _result="OFF"
+ fi
+ done
+ echo "${_result}"
+}
+
+#-DBUILD_SHARED_LIBS=ON
+#-DLLAMA_STATIC=ON
+#-DLLAMA_QKK_64=ON
+_cmake_options_common="
+ -DBUILD_TESTING=ON
+ -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_INSTALL_PREFIX=/usr
+ -DLLAMA_ACCELERATE=ON
+ -DLLAMA_ALL_WARNINGS=OFF
+ -DLLAMA_ALL_WARNINGS_3RD_PARTY=OFF
+ -DLLAMA_FATAL_WARNINGS=OFF
+ -DLLAMA_AVX="$(_check_cpufeature avx)" -DLLAMA_AVX2="$(_check_cpufeature avx2)" -DLLAMA_AVX512="$(_check_cpufeature avx512)" -DLLAMA_AVX512_VBMI="$(_check_cpufeature avx512vbmi)" -DLLAMA_AVX512_VNNI="$(_check_cpufeature avx512_vnni)" -DLLAMA_F16C="$(_check_cpufeature f16c)" -DLLAMA_FMA="$(_check_cpufeature fma)"
+ -DLLAMA_BUILD_EXAMPLES=ON -DLLAMA_BUILD_SERVER=ON -DLLAMA_BUILD_TESTS=ON
+ -DLLAMA_CPU_HBM=OFF -DLLAMA_CUBLAS=OFF -DLLAMA_CUDA=OFF -DLLAMA_HIPBLAS=OFF -DLLAMA_HIP_UMA=OFF -DLLAMA_METAL=OFF -DLLAMA_SYCL=OFF -DLLAMA_KOMPUTE=OFF
+ -DLLAMA_LTO="$(_check_makepkgpotion lto)"
+ -DLLAMA_GPROF=OFF -DLLAMA_PERF=OFF -DLLAMA_SANITIZE_ADDRESS=OFF -DLLAMA_SANITIZE_THREAD=OFF -DLLAMA_SANITIZE_UNDEFINED=OFF
+ -DLLAMA_SERVER_SSL=ON -DLLAMA_SERVER_VERBOSE=ON
+"
+_cmake_options_blas="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=openblas"
+_cmake_options_mpi="-DLLAMA_MPI=ON"
+_cmake_options_clblas="-DLLAMA_CLBLAST=ON"
+_cmake_options_vulkan="-DLLAMA_VULKAN=ON -DLLAMA_VULKAN_CHECK_RESULTS=ON -DLLAMA_VULKAN_DEBUG=OFF -DLLAMA_VULKAN_RUN_TESTS=ON -DLLAMA_VULKAN_VALIDATE=OFF"
+
+prepare() {
+ export GOPATH="${srcdir}/go"
+ cd "${srcdir}/${_name}"
+
+ # Add submodules
+ git submodule init
+ git config --local submodule.llama.cpp.url "${srcdir}/llama.cpp"
+ git -c protocol.file.allow=always submodule update
+
+ for _patch in "${srcdir}/disable-rocm-cuda.gen_linux.sh.patch"; do
+ printf '%s\n' " > Applying patch $(basename "${_patch}") ..."
+ patch -Np1 --follow-symlinks -i "${_patch}"
+ done
+
+ # Generate git logfile for later installation into the documentation directory
+ git log > git.log
+
+ # Turn LTO on and set the build type to Release
+ #sed -i 's,T_CODE=on,T_CODE=on -D LLAMA_LTO=on -D CMAKE_BUILD_TYPE=Release,g' llm/generate/gen_linux.sh
+
+ # Display a more helpful error message
+ sed -i "s|could not connect to ollama server, run 'ollama serve' to start it|ollama is not running, try 'systemctl start ollama' or 'ollama serve'|g" cmd/cmd.go
+
+ printf '%s\n' " > Downloading go dependencies ..."
+ go get
+
+ for _variant in "${pkgname[@]}"; do
+ _variant="${_variant%-git}"
+ printf '%s\n' " > Creating source directory for ${_variant} ..."
+ cp -r "${srcdir}/${_name}" "${srcdir}/${_variant}"
+ done
+}
+
+pkgver() {
+ cd "${srcdir}/${_name}"
+
+ _ver="$(git describe --tags | sed -E -e 's|^[vV]||' -e 's|\-g[0-9a-f]*$||' | tr '-' '+')"
+ _rev="$(git rev-list --count HEAD)"
+ _date="$(git log -1 --date=format:"%Y%m%d" --format="%ad")"
+ _hash="$(git rev-parse --short HEAD)"
+
+ if [ -z "${_ver}" ]; then
+ error "Could not determine version."
+ return 1
+ else
+ printf '%s' "${_ver}.r${_rev}.${_date}.${_hash}"
+ fi
+}
+
+build() {
+ export GOPATH="${srcdir}/go"
+ #export CFLAGS="-march=native -mtune=generic -O2 -pipe -fno-plt"
+ #export CXXFLAGS="$CFLAGS"
+ #export CGO_CFLAGS="$CFLAGS" CGO_CPPFLAGS="$CPPFLAGS" CGO_CXXFLAGS="$CXXFLAGS" CGO_LDFLAGS="$LDFLAGS"
+
+
+ if "${_build_generic}"; then
+ cd "${srcdir}/ollama-generic"
+
+ export OLLAMA_CUSTOM_CPU_DEFS="${_cmake_options_common}"
+
+ printf '\n'
+ printf '%s\n' " > Compiling generic variant ..."
+ printf '\n'
+ go generate ./...
+ cp llm/build/linux/x86_64/cpu/libllama.a llm/build/linux/x86_64_static/
+ cp llm/build/linux/x86_64_static/libllama.so llm/build/linux/x86_64/cpu/
+ go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external -ldflags=-buildid='' -ldflags="-X=github.com/jmorganca/ollama/version.Version=${pkgver}"
+ fi
+
+
+ if "${_build_openmpi}"; then
+ cd "${srcdir}/ollama-openmpi"
+
+ export OLLAMA_CUSTOM_CPU_DEFS="${_cmake_options_common} ${_cmake_options_mpi}"
+
+ printf '\n'
+ printf '%s\n' " > Compiling OpenMPI variant ..."
+ printf '\n'
+ go generate ./...
+ cp llm/build/linux/x86_64/cpu/libllama.a llm/build/linux/x86_64_static/
+ cp llm/build/linux/x86_64_static/libllama.so llm/build/linux/x86_64/cpu/
+ go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external -ldflags=-buildid='' -ldflags="-X=github.com/jmorganca/ollama/version.Version=${pkgver}"
+ fi
+
+
+ if "${_build_openblas}"; then
+ cd "${srcdir}/ollama-openblas"
+
+ export OLLAMA_CUSTOM_CPU_DEFS="${_cmake_options_common} ${_cmake_options_blas}"
+
+ printf '\n'
+ printf '%s\n' " > Compiling openblas variant ..."
+ printf '\n'
+ go generate ./...
+ cp llm/build/linux/x86_64/cpu/libllama.a llm/build/linux/x86_64_static/
+ cp llm/build/linux/x86_64_static/libllama.so llm/build/linux/x86_64/cpu/
+ go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external -ldflags=-buildid='' -ldflags="-X=github.com/jmorganca/ollama/version.Version=${pkgver}"
+ fi
+
+
+ if "${_build_clblas}"; then
+ cd "${srcdir}/ollama-clblas"
+
+ export OLLAMA_CUSTOM_CPU_DEFS="${_cmake_options_common} ${_cmake_options_clblas}"
+
+ printf '\n'
+ printf '%s\n' " > Compiling clblas variant ..."
+ printf '\n'
+ go generate ./...
+ cp llm/build/linux/x86_64/cpu/libllama.a llm/build/linux/x86_64_static/
+ cp llm/build/linux/x86_64_static/libllama.so llm/build/linux/x86_64/cpu/
+ go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external -ldflags=-buildid='' -ldflags="-X=github.com/jmorganca/ollama/version.Version=${pkgver}"
+ fi
+
+
+ if "${_build_vulkan}"; then
+ cd "${srcdir}/ollama-vulkan"
+
+ export OLLAMA_CUSTOM_CPU_DEFS="${_cmake_options_common} ${_cmake_options_vulkan}"
+
+ printf '\n'
+ printf '%s\n' " > Compiling vulkan variant ..."
+ printf '\n'
+ go generate ./...
+ cp llm/build/linux/x86_64/cpu/libllama.a llm/build/linux/x86_64_static/
+ cp llm/build/linux/x86_64_static/libllama.so llm/build/linux/x86_64/cpu/
+ go build -buildmode=pie -trimpath -mod=readonly -modcacherw -ldflags=-linkmode=external -ldflags=-buildid='' -ldflags="-X=github.com/jmorganca/ollama/version.Version=${pkgver}"
+ fi
+}
+
+check() {
+ export GOPATH="${srcdir}/go"
+ for _variant in "${pkgname[@]}"; do
+ _variant="${_variant%-git}"
+ printf '\n'
+ printf '%s\n' " > Running tests for ${_variant} ..."
+ printf '\n'
+ cd "${srcdir}/${_variant}"
+ go test ./api ./format
+ ./ollama --version > /dev/null
+ done
+}
+
+package_ollama-generic-git() {
+ pkgdesc="Create, run and share large language models (LLMs). CPU optimisation only."
+ depends+=(
+ )
+ optdepends=()
+ provides=(
+ "${_name}=${pkgver}"
+ "${_name}-git=${pkgver}"
+ )
+ conflicts=(
+ "${_name}"
+ )
+ _variant="ollama-generic"
+ cd "${srcdir}/${_variant}"
+
+ install -Dvm755 -t "${pkgdir}/usr/bin" 'ollama'
+ install -dvm755 "${pkgdir}/var/lib/ollama"
+ install -Dvm644 -t "${pkgdir}/usr/lib/systemd/system" "${srcdir}/ollama.service"
+ install -Dvm644 "${srcdir}/sysusers.conf" "${pkgdir}/usr/lib/sysusers.d/ollama.conf"
+ install -Dvm644 "${srcdir}/tmpfiles.d" "${pkgdir}/usr/lib/tmpfiles.d/ollama.conf"
+
+ install -Dvm644 -t "${pkgdir}/usr/share/doc/${_name}" git.log README.md
+ install -Dvm644 -t "${pkgdir}/usr/share/licenses/${pkgname}" "LICENSE"
+ ln -svr "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" "${pkgdir}/usr/share/doc/${_name}/LICENSE"
+}
+
+package_ollama-openmpi-git() {
+ pkgdesc="Create, run and share large language models (LLMs). CPU optimisation with openMPI."
+ depends+=(
+ )
+ optdepends=()
+ provides=(
+ "${_name}=${pkgver}"
+ "${_name}-git=${pkgver}"
+ )
+ conflicts=(
+ "${_name}"
+ )
+ _variant="ollama-openmpi"
+ cd "${srcdir}/${_variant}"
+
+ install -Dvm755 -t "${pkgdir}/usr/bin" 'ollama'
+ install -dvm755 "${pkgdir}/var/lib/ollama"
+ install -Dvm644 -t "${pkgdir}/usr/lib/systemd/system" "${srcdir}/ollama.service"
+ install -Dvm644 "${srcdir}/sysusers.conf" "${pkgdir}/usr/lib/sysusers.d/ollama.conf"
+ install -Dvm644 "${srcdir}/tmpfiles.d" "${pkgdir}/usr/lib/tmpfiles.d/ollama.conf"
+
+ install -Dvm644 -t "${pkgdir}/usr/share/doc/${_name}" git.log README.md
+ install -Dvm644 -t "${pkgdir}/usr/share/licenses/${pkgname}" "LICENSE"
+ ln -svr "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" "${pkgdir}/usr/share/doc/${_name}/LICENSE"
+}
+
+package_ollama-openblas-git() {
+ pkgdesc="Create, run and share large language models (LLMs). CPU optimisation with openblas."
+ depends+=(
+ )
+ optdepends=()
+ provides=(
+ "${_name}=${pkgver}"
+ "${_name}-git=${pkgver}"
+ )
+ conflicts=(
+ "${_name}"
+ )
+ _variant="ollama-openblas"
+ cd "${srcdir}/${_variant}"
+
+ install -Dvm755 -t "${pkgdir}/usr/bin" 'ollama'
+ install -dvm755 "${pkgdir}/var/lib/ollama"
+ install -Dvm644 -t "${pkgdir}/usr/lib/systemd/system" "${srcdir}/ollama.service"
+ install -Dvm644 "${srcdir}/sysusers.conf" "${pkgdir}/usr/lib/sysusers.d/ollama.conf"
+ install -Dvm644 "${srcdir}/tmpfiles.d" "${pkgdir}/usr/lib/tmpfiles.d/ollama.conf"
+
+ install -Dvm644 -t "${pkgdir}/usr/share/doc/${_name}" git.log README.md
+ install -Dvm644 -t "${pkgdir}/usr/share/licenses/${pkgname}" "LICENSE"
+ ln -svr "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" "${pkgdir}/usr/share/doc/${_name}/LICENSE"
+}
+
+package_ollama-clblas-git() {
+ pkgdesc="Create, run and share large language models (LLMs). With clblast backend."
+ depends+=(
+ 'clblast'
+ )
+ optdepends=()
+ provides=(
+ "${_name}=${pkgver}"
+ "${_name}-git=${pkgver}"
+ )
+ conflicts=(
+ "${_name}"
+ )
+ _variant="ollama-clblas"
+ cd "${srcdir}/${_variant}"
+
+ install -Dvm755 -t "${pkgdir}/usr/bin" 'ollama'
+ install -dvm755 "${pkgdir}/var/lib/ollama"
+ install -Dvm644 -t "${pkgdir}/usr/lib/systemd/system" "${srcdir}/ollama.service"
+ install -Dvm644 "${srcdir}/sysusers.conf" "${pkgdir}/usr/lib/sysusers.d/ollama.conf"
+ install -Dvm644 "${srcdir}/tmpfiles.d" "${pkgdir}/usr/lib/tmpfiles.d/ollama.conf"
+
+ install -Dvm644 -t "${pkgdir}/usr/share/doc/${_name}" git.log README.md
+ install -Dvm644 -t "${pkgdir}/usr/share/licenses/${pkgname}" "LICENSE"
+ ln -svr "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" "${pkgdir}/usr/share/doc/${_name}/LICENSE"
+}
+
+package_ollama-vulkan-git() {
+ pkgdesc="Create, run and share large language models (LLMs). With vulkan backend."
+ depends+=(
+ )
+ optdepends=()
+ provides=(
+ "${_name}=${pkgver}"
+ "${_name}-git=${pkgver}"
+ )
+ conflicts=(
+ "${_name}"
+ )
+ _variant="ollama-vulkan"
+ cd "${srcdir}/${_variant}"
+
+ install -Dvm755 -t "${pkgdir}/usr/bin" 'ollama'
+ install -dvm755 "${pkgdir}/var/lib/ollama"
+ install -Dvm644 -t "${pkgdir}/usr/lib/systemd/system" "${srcdir}/ollama.service"
+ install -Dvm644 "${srcdir}/sysusers.conf" "${pkgdir}/usr/lib/sysusers.d/ollama.conf"
+ install -Dvm644 "${srcdir}/tmpfiles.d" "${pkgdir}/usr/lib/tmpfiles.d/ollama.conf"
+
+ install -Dvm644 -t "${pkgdir}/usr/share/doc/${_name}" git.log README.md
+ install -Dvm644 -t "${pkgdir}/usr/share/licenses/${pkgname}" "LICENSE"
+ ln -svr "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE" "${pkgdir}/usr/share/doc/${_name}/LICENSE"
+}
diff --git a/disable-rocm-cuda.gen_linux.sh.patch b/disable-rocm-cuda.gen_linux.sh.patch
new file mode 100644
index 000000000000..2c4a04015449
--- /dev/null
+++ b/disable-rocm-cuda.gen_linux.sh.patch
@@ -0,0 +1,117 @@
+diff -rU1 ollama.orig/llm/generate/gen_linux.sh ollama/llm/generate/gen_linux.sh
+--- ollama.orig/llm/generate/gen_linux.sh 2024-04-17 15:02:16.439575013 +0200
++++ ollama/llm/generate/gen_linux.sh 2024-04-17 15:16:15.656217455 +0200
+@@ -53,3 +53,3 @@
+ fi
+-COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
++COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=on"
+ source $(dirname $0)/gen_common.sh
+@@ -67,3 +67,3 @@
+ CMAKE_TARGETS="--target llama --target ggml"
+- CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
++ CMAKE_DEFS="-DBUILD_SHARED_LIBS=on -DLLAMA_NATIVE=on ${CMAKE_DEFS}"
+ BUILD_DIR="../build/linux/${ARCH}_static"
+@@ -95,3 +95,3 @@
+
+- COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off"
++ COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=on"
+ if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
+@@ -101,3 +101,3 @@
+ init_vars
+- CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
++ CMAKE_DEFS="${COMMON_CPU_DEFS} ${CMAKE_DEFS}"
+ BUILD_DIR="../build/linux/${ARCH}/cpu"
+@@ -143,64 +143,2 @@
+
+-# If needed, look for the default CUDA toolkit location
+-if [ -z "${CUDA_LIB_DIR}" ] && [ -d /usr/local/cuda/lib64 ]; then
+- CUDA_LIB_DIR=/usr/local/cuda/lib64
+-fi
+-
+-# If needed, look for CUDA on Arch Linux
+-if [ -z "${CUDA_LIB_DIR}" ] && [ -d /opt/cuda/targets/x86_64-linux/lib ]; then
+- CUDA_LIB_DIR=/opt/cuda/targets/x86_64-linux/lib
+-fi
+-
+-# Allow override in case libcudart is in the wrong place
+-if [ -z "${CUDART_LIB_DIR}" ]; then
+- CUDART_LIB_DIR="${CUDA_LIB_DIR}"
+-fi
+-
+-if [ -d "${CUDA_LIB_DIR}" ]; then
+- echo "CUDA libraries detected - building dynamic CUDA library"
+- init_vars
+- CUDA_MAJOR=$(ls "${CUDA_LIB_DIR}"/libcudart.so.* | head -1 | cut -f3 -d. || true)
+- if [ -n "${CUDA_MAJOR}" ]; then
+- CUDA_VARIANT=_v${CUDA_MAJOR}
+- fi
+- if [ "${ARCH}" == "arm64" ]; then
+- echo "ARM CPU detected - disabling unsupported AVX instructions"
+-
+- # ARM-based CPUs such as M1 and Tegra do not support AVX extensions.
+- #
+- # CUDA compute < 6.0 lacks proper FP16 support on ARM.
+- # Disabling has minimal performance effect while maintaining compatibility.
+- ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off"
+- fi
+- CMAKE_DEFS="-DLLAMA_CUDA=on -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS}"
+- BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
+- EXTRA_LIBS="-L${CUDA_LIB_DIR} -lcudart -lcublas -lcublasLt -lcuda"
+- build
+-
+- # Carry the CUDA libs as payloads to help reduce dependency burden on users
+- #
+- # TODO - in the future we may shift to packaging these separately and conditionally
+- # downloading them in the install script.
+- DEPS="$(ldd ${BUILD_DIR}/bin/ollama_llama_server )"
+- for lib in libcudart.so libcublas.so libcublasLt.so ; do
+- DEP=$(echo "${DEPS}" | grep ${lib} | cut -f1 -d' ' | xargs || true)
+- if [ -n "${DEP}" -a -e "${CUDA_LIB_DIR}/${DEP}" ]; then
+- cp "${CUDA_LIB_DIR}/${DEP}" "${BUILD_DIR}/bin/"
+- elif [ -e "${CUDA_LIB_DIR}/${lib}.${CUDA_MAJOR}" ]; then
+- cp "${CUDA_LIB_DIR}/${lib}.${CUDA_MAJOR}" "${BUILD_DIR}/bin/"
+- elif [ -e "${CUDART_LIB_DIR}/${lib}" ]; then
+- cp -d ${CUDART_LIB_DIR}/${lib}* "${BUILD_DIR}/bin/"
+- else
+- cp -d "${CUDA_LIB_DIR}/${lib}*" "${BUILD_DIR}/bin/"
+- fi
+- done
+- compress
+-
+-fi
+-
+-if [ -z "${ROCM_PATH}" ]; then
+- # Try the default location in case it exists
+- ROCM_PATH=/opt/rocm
+-fi
+-
+ if [ -z "${CLBlast_DIR}" ]; then
+@@ -211,28 +149,2 @@
+ fi
+-
+-if [ -d "${ROCM_PATH}" ]; then
+- echo "ROCm libraries detected - building dynamic ROCm library"
+- if [ -f ${ROCM_PATH}/lib/librocblas.so.*.*.????? ]; then
+- ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
+- fi
+- init_vars
+- CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
+- BUILD_DIR="../build/linux/${ARCH}/rocm${ROCM_VARIANT}"
+- EXTRA_LIBS="-L${ROCM_PATH}/lib -L/opt/amdgpu/lib/x86_64-linux-gnu/ -Wl,-rpath,\$ORIGIN/../../rocm/ -lhipblas -lrocblas -lamdhip64 -lrocsolver -lamd_comgr -lhsa-runtime64 -lrocsparse -ldrm -ldrm_amdgpu"
+- build
+-
+- # Record the ROCM dependencies
+- rm -f "${BUILD_DIR}/bin/deps.txt"
+- touch "${BUILD_DIR}/bin/deps.txt"
+- for dep in $(ldd "${BUILD_DIR}/bin/ollama_llama_server" | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -e rocm -e amdgpu -e libtinfo ); do
+- echo "${dep}" >> "${BUILD_DIR}/bin/deps.txt"
+- done
+- # bomb out if for some reason we didn't get a few deps
+- if [ $(cat "${BUILD_DIR}/bin/deps.txt" | wc -l ) -lt 8 ] ; then
+- cat "${BUILD_DIR}/bin/deps.txt"
+- echo "ERROR: deps file short"
+- exit 1
+- fi
+- compress
+-fi
+
diff --git a/ollama.service b/ollama.service
new file mode 100644
index 000000000000..b1c1637afa2a
--- /dev/null
+++ b/ollama.service
@@ -0,0 +1,20 @@
+[Unit]
+Description=Ollama Service
+Wants=network-online.target
+After=network.target network-online.target
+
+[Service]
+ExecStart=/usr/bin/ollama serve
+WorkingDirectory=/var/lib/ollama
+Environment="HOME=/var/lib/ollama" "GIN_MODE=release"
+User=ollama
+Group=ollama
+Restart=on-failure
+RestartSec=3
+Type=simple
+PrivateTmp=yes
+ProtectSystem=full
+ProtectHome=yes
+
+[Install]
+WantedBy=multi-user.target
diff --git a/sysusers.conf b/sysusers.conf
new file mode 100644
index 000000000000..9cc5c5cd0c4a
--- /dev/null
+++ b/sysusers.conf
@@ -0,0 +1,2 @@
+g ollama - -
+u ollama - "ollama user" /var/lib/ollama
diff --git a/tmpfiles.d b/tmpfiles.d
new file mode 100644
index 000000000000..cb73c010eb33
--- /dev/null
+++ b/tmpfiles.d
@@ -0,0 +1 @@
+Q /var/lib/ollama 0755 ollama ollama