aboutsummarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Erkinger2024-08-29 23:06:50 +0200
committerFelix Erkinger2024-08-29 23:06:50 +0200
commitcc0db27a5768d5d9b14192e1bfd8d033f88fcf10 (patch)
tree5d7f70574d46b4e1cc63ae55352ce637b8e2a961
parent4ace60946df577a8a451acc729a4a0d584cefe00 (diff)
downloadaur-cc0db27a5768d5d9b14192e1bfd8d033f88fcf10.tar.gz
upgpkg: localai-git 2.20.1.37.gae6d3276-1 , update python flavors
-rw-r--r--.SRCINFO72
-rw-r--r--PKGBUILD134
-rw-r--r--backend-req.patch32
-rw-r--r--libbackend.patch50
-rw-r--r--localai.service59
5 files changed, 169 insertions, 178 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 5ca5f0d7d32c..9d280bee20d1 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,6 +1,6 @@
pkgbase = localai-git
pkgdesc = Self-hosted OpenAI API alternative - Open Source, community-driven and local-first.
- pkgver = 2.20.0
+ pkgver = 2.20.1.37.gae6d3276
pkgrel = 1
url = https://github.com/mudler/LocalAI
arch = x86_64
@@ -13,9 +13,12 @@ pkgbase = localai-git
makedepends = blas-openblas
makedepends = sdl2
makedepends = ffmpeg
+ makedepends = upx
makedepends = protoc-gen-go
makedepends = protoc-gen-go-grpc
- makedepends = upx
+ makedepends = python-protobuf
+ makedepends = python-grpcio
+ makedepends = python-grpcio-tools
makedepends = onnxruntime
makedepends = libucd-git
makedepends = cuda
@@ -29,24 +32,12 @@ pkgbase = localai-git
depends = protobuf
depends = grpc
depends = espeak-ng
- depends = python-protobuf
- depends = python-grpcio
- depends = python-grpcio-tools
- depends = python-numpy
- depends = python-opencv
- depends = python-pillow
- depends = python-pytorch
- depends = python-torchaudio
- depends = python-torchvision
- depends = python-accelerate
provides = localai
- provides = local-ai=2.20.0
+ provides = local-ai=2.20.1.37.gae6d3276
conflicts = localai
conflicts = local-ai
backup = etc/localai/localai.conf
source = localai::git+https://github.com/mudler/LocalAI
- source = libbackend.patch
- source = backend-req.patch
source = README.md
source = localai.conf
source = localai.service
@@ -58,62 +49,75 @@ pkgbase = localai-git
sha256sums = SKIP
sha256sums = SKIP
sha256sums = SKIP
- sha256sums = SKIP
- sha256sums = SKIP
pkgname = localai-git
depends = protobuf
depends = grpc
depends = espeak-ng
+ depends = openblas
+ depends = onnxruntime
depends = python-protobuf
depends = python-grpcio
- depends = python-grpcio-tools
- depends = python-numpy
- depends = python-opencv
+ depends = python-certifi
depends = python-pillow
+ depends = python-opencv
+ depends = python-numpy
depends = python-pytorch
depends = python-torchaudio
depends = python-torchvision
+ depends = python-transformers
+ depends = python-sentencepiece
+ depends = python-peft
depends = python-accelerate
- depends = openblas
- depends = onnxruntime
+
+pkgname = localai-git-python
pkgname = localai-git-cuda
pkgdesc = Self-hosted OpenAI API alternative - Open Source, community-driven and local-first. (with CUDA support)
depends = protobuf
depends = grpc
depends = espeak-ng
+ depends = cuda
+ depends = onnxruntime
depends = python-protobuf
depends = python-grpcio
- depends = python-grpcio-tools
- depends = python-numpy
- depends = python-opencv
+ depends = python-certifi
depends = python-pillow
+ depends = python-opencv
+ depends = python-numpy
depends = python-pytorch
depends = python-torchaudio
depends = python-torchvision
+ depends = python-transformers
+ depends = python-sentencepiece
+ depends = python-peft
depends = python-accelerate
- depends = cuda
- depends = onnxruntime
depends = python-pytorch-cuda
+pkgname = localai-git-cuda-python
+
pkgname = localai-git-rocm
pkgdesc = Self-hosted OpenAI API alternative - Open Source, community-driven and local-first. (with ROCM support)
depends = protobuf
depends = grpc
depends = espeak-ng
+ depends = rocm-hip-runtime
+ depends = hipblas
+ depends = rocblas
+ depends = onnxruntime
depends = python-protobuf
depends = python-grpcio
- depends = python-grpcio-tools
- depends = python-numpy
- depends = python-opencv
+ depends = python-certifi
depends = python-pillow
+ depends = python-opencv
+ depends = python-numpy
depends = python-pytorch
depends = python-torchaudio
depends = python-torchvision
+ depends = python-transformers
+ depends = python-sentencepiece
+ depends = python-peft
depends = python-accelerate
- depends = rocm-hip-runtime
- depends = hipblas
- depends = rocblas
- depends = onnxruntime
depends = python-pytorch-rocm
+
+pkgname = localai-git-rocm-python
diff --git a/PKGBUILD b/PKGBUILD
index 551ab373b49f..29140abb3ea8 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -8,12 +8,16 @@ _ENABLE_ROCM=${_ENABLE_ROCM:-1}
# if set to 1 make is called with -j <physical cores> for paralell building, set to 0 for debug
_ENABLE_PARALLEL=${_ENABLE_PARALLEL:-1}
-# additional piper backend
+
+# additional backends
+# piper (text2speech) backend
_ENABLE_PIPER=${_ENABLE_PIPER:-1}
-# additional whisper backend
+# whisper (speech2text) backend
_ENABLE_WHISPER=${_ENABLE_WHISPER:-1}
-# additional python backends if set to 1
+# python backends, enables "-python" package flavors
_ENABLE_PYTHON=${_ENABLE_PYTHON:-1}
+# will be automatically set on "-python" package flavors
+_IS_PYTHON_FLAVOR=0
# if GPU_TARGETS and AMDGPU_TARGETS are not set, mirror architecture list from arch:python-pytorch@2.3.0-2
_AMDGPU_TARGETS="gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
@@ -27,7 +31,7 @@ _OPTIONAL_MAKE_ARGS="${_OPTIONAL_MAKE_ARGS:-}"
# limit pulling external sources
_EXTERNAL_SOURCES="backend/cpp/llama/llama.cpp"
-# disabled_sources=go-llama.cpp gpt4all go-rwkv.cpp go-stable-diffusion go-tiny-dream go-bert go-piper whisper
+# disabled: go-llama.cpp gpt4all go-rwkv.cpp go-stable-diffusion go-tiny-dream go-bert go-piper whisper
_DISABLED_MOD_EDIT="nomic-ai/gpt4all/gpt4all mudler/go-stable-diffusion \
go-skynet/go-llama.cpp go-skynet/go-bert.cpp donomii/go-rwkv.cpp M0Rf30/go-tiny-dream"
@@ -47,7 +51,8 @@ else
_GO_TAGS=""
fi
-# disabled backends: backend-assets/util/llama-cpp-rpc-server llama-cpp-grpc llama-ggml gpt4all rwkv tinydream bert-embeddings huggingface stablediffusion
+# disabled backends: backend-assets/util/llama-cpp-rpc-server llama-cpp-grpc
+# llama-ggml gpt4all rwkv tinydream bert-embeddings huggingface stablediffusion
# enabled backends
_GRPC_BACKENDS="backend-assets/grpc/local-store \
$_OPTIONAL_GRPC"
@@ -55,7 +60,7 @@ $_OPTIONAL_GRPC"
_pkgbase="localai"
pkgbase="${_pkgbase}-git"
pkgname=()
-pkgver=2.20.0
+pkgver=2.20.1.37.gae6d3276
pkgrel=1
pkgdesc="Self-hosted OpenAI API alternative - Open Source, community-driven and local-first."
url="https://github.com/mudler/LocalAI"
@@ -67,8 +72,6 @@ backup=("etc/${_pkgbase}/${_pkgbase}.conf")
source=(
"${_pkgbase}"::"git+https://github.com/mudler/LocalAI"
- "libbackend.patch"
- "backend-req.patch"
"README.md"
"${_pkgbase}.conf"
"${_pkgbase}.service"
@@ -83,8 +86,6 @@ sha256sums=(
'SKIP'
'SKIP'
'SKIP'
- 'SKIP'
- 'SKIP'
)
depends=(
@@ -92,6 +93,37 @@ depends=(
'grpc'
)
+# system wide dependencies for python backends
+_python_depends=(
+ 'python-protobuf'
+ 'python-grpcio'
+ 'python-certifi'
+ 'python-pillow'
+ 'python-opencv'
+ 'python-numpy'
+ 'python-pytorch'
+ 'python-torchaudio'
+ 'python-torchvision'
+ 'python-transformers'
+ 'python-sentencepiece'
+ 'python-peft'
+ 'python-accelerate'
+ )
+
+# python backends and their local dependencies
+_python_backends=(
+ "autogptq auto-gptq"
+ "bark bark"
+ "coqui coqui-tts"
+ "diffusers diffusers compel optimum-quanto"
+ "parler-tts llvmlite"
+ "rerankers rerankers[transformers]"
+ "transformers"
+ "transformers-musicgen"
+ "vall-e-x"
+ "vllm vllm"
+)
+
makedepends=(
'go'
'git'
@@ -101,28 +133,18 @@ makedepends=(
'blas-openblas'
'sdl2'
'ffmpeg'
+ 'upx'
'protoc-gen-go'
'protoc-gen-go-grpc'
- 'upx'
+ 'python-protobuf'
+ 'python-grpcio'
+ 'python-grpcio-tools'
)
-if [[ $_ENABLE_PYTHON = 1 ]]; then
+if [[ $_ENABLE_PIPER = 1 ]]; then
depends+=(
'espeak-ng'
- 'python-protobuf'
- 'python-grpcio'
- 'python-grpcio-tools'
- 'python-numpy'
- 'python-opencv'
- 'python-pillow'
- 'python-pytorch'
- 'python-torchaudio'
- 'python-torchvision'
- 'python-accelerate'
)
-fi
-
-if [[ $_ENABLE_PIPER = 1 ]]; then
makedepends+=(
'onnxruntime'
'libucd-git'
@@ -131,10 +153,16 @@ fi
if [[ $_ENABLE_CPU = 1 ]]; then
pkgname+=("${pkgbase}")
+ if [[ $_ENABLE_PYTHON = 1 ]]; then
+ pkgname+=("${pkgbase}-python")
+ fi
fi
if [[ $_ENABLE_CUDA = 1 ]]; then
pkgname+=("${pkgbase}-cuda")
+ if [[ $_ENABLE_PYTHON = 1 ]]; then
+ pkgname+=("${pkgbase}-cuda-python")
+ fi
makedepends+=(
'cuda'
'cudnn'
@@ -145,6 +173,9 @@ fi
if [[ $_ENABLE_ROCM = 1 ]]; then
pkgname+=("${pkgbase}-rocm")
+ if [[ $_ENABLE_PYTHON = 1 ]]; then
+ pkgname+=("${pkgbase}-rocm-python")
+ fi
makedepends+=(
'rocm-hip-sdk'
'miopen-hip'
@@ -172,10 +203,11 @@ Build Options:
_ENABLE_CPU=$_ENABLE_CPU
_ENABLE_CUDA=$_ENABLE_CUDA
_ENABLE_ROCM=$_ENABLE_ROCM
+_ENABLE_PYTHON=$_ENABLE_PYTHON
+
_ENABLE_PARALLEL=$_ENABLE_PARALLEL
_ENABLE_PIPER=$_ENABLE_PIPER
_ENABLE_WHISPER=$_ENABLE_WHISPER
-_ENABLE_PYTHON=$_ENABLE_PYTHON
_OPTIONAL_MAKE_ARGS=$_OPTIONAL_MAKE_ARGS
_EXTERNAL_SOURCES=$_EXTERNAL_SOURCES
@@ -198,11 +230,7 @@ EOF
mkdir -p "sources"
make $_OPTIONAL_MAKE_ARGS $_EXTERNAL_SOURCES
- # modify python backend build library to use --system-site-packages, and dont reinstall torch*
- patch -N -i "${srcdir}/libbackend.patch" -p1
-
- # modify python backend requirements
- patch -N -i "${srcdir}/backend-req.patch" -p1
+ # patch -N -i "${srcdir}/libbackend.patch" -p1
if [[ $_ENABLE_PIPER = 1 ]]; then
# fix piper build
@@ -230,12 +258,11 @@ EOF
}
_build() {
- if [[ $_ENABLE_PYTHON = 1 ]]; then
- # generate grpc protobuf files for python and copy to backend-assets
- make BUILD_TYPE="$1" protogen-python
- mkdir -p backend-assets/grpc
- cp -a backend/python backend-assets/grpc/python
- fi
+ # generate grpc protobuf files for python and copy to backend-assets
+ make BUILD_TYPE="$1" protogen-python
+ mkdir -p backend-assets/grpc
+ cp -a backend/python backend-assets/grpc/python
+
if test "$1" = "cublas"; then
_LLAMA_CPP_BACKEND="backend-assets/grpc/llama-cpp-cuda"
elif test "$1" = "hipblas"; then
@@ -243,23 +270,24 @@ _build() {
else
_LLAMA_CPP_BACKEND="backend-assets/grpc/llama-cpp-avx2"
fi
+
cat - << EOF
_build($1):
-
GO_TAGS=$_GO_TAGS
OPTIONAL_MAKE_ARGS=$_OPTIONAL_MAKE_ARGS
LLAMA_BACKEND=$_LLAMA_CPP_BACKEND
OTHER_GRPC_BACKENDS=$_GRPC_BACKENDS
-
EOF
+
_nproc=1
if [[ $_ENABLE_PARALLEL = 1 ]]; then
# use number of physical cores for parallel build
_nproc=$(grep "^core id" /proc/cpuinfo | sort -n | uniq | wc -l)
fi
+
make -j"$_nproc" \
BUILD_TYPE="$1" \
GRPC_BACKENDS="$_LLAMA_CPP_BACKEND $_GRPC_BACKENDS" \
@@ -297,10 +325,16 @@ build() {
_package_install() {
install -Dm755 "local-ai" "${pkgdir}/usr/bin/localai"
ln -s "/usr/bin/localai" "${pkgdir}/usr/bin/local-ai"
+ install -Dm644 LICENSE -t "${pkgdir}/usr/share/licenses/${_pkgbase}"
install -Dm644 README.md -t "${pkgdir}/usr/share/doc/${_pkgbase}"
install -Dm644 "${srcdir}/README.md" "${pkgdir}/usr/share/doc/${_pkgbase}/README-build.md"
- install -Dm644 LICENSE -t "${pkgdir}/usr/share/licenses/${_pkgbase}"
install -Dm644 ${srcdir}/${_pkgbase}.conf -t "${pkgdir}/etc/${_pkgbase}"
+ _python_backends_str=""
+ if [[ $_IS_PYTHON_FLAVOR = 1 ]]; then
+ _python_backends_str=$(printf "%s\n" "${_python_backends[@]}")
+ fi
+ echo "ARCH_LOCALAI_PYTHON_BACKENDS=\"${_python_backends_str}\"" \
+ > "${pkgdir}/etc/${_pkgbase}/python_backends.conf"
install -Dm644 ${srcdir}/${_pkgbase}.service -t "${pkgdir}/usr/lib/systemd/system"
install -Dm644 ${srcdir}/${_pkgbase}.sysusers "${pkgdir}/usr/lib/sysusers.d/${_pkgbase}.conf"
install -Dm644 ${srcdir}/${_pkgbase}.tmpfiles "${pkgdir}/usr/lib/tmpfiles.d/${_pkgbase}.conf"
@@ -310,6 +344,7 @@ package_localai-git() {
cd "${srcdir}/${_pkgbase}-cpu"
depends+=('openblas')
if [[ $_ENABLE_PIPER = 1 ]]; then depends+=('onnxruntime'); fi
+ if [[ $_IS_PYTHON_FLAVOR = 1 ]]; then depends+=("${_python_depends[@]}"); fi
_package_install
}
@@ -318,7 +353,7 @@ package_localai-git-cuda() {
pkgdesc+=' (with CUDA support)'
depends+=('cuda')
if [[ $_ENABLE_PIPER = 1 ]]; then depends+=('onnxruntime'); fi
- if [[ $_ENABLE_PYTHON = 1 ]]; then depends+=('python-pytorch-cuda'); fi
+ if [[ $_IS_PYTHON_FLAVOR = 1 ]]; then depends+=("${_python_depends[@]}"); depends+=('python-pytorch-cuda'); fi
_package_install
}
@@ -327,6 +362,21 @@ package_localai-git-rocm() {
pkgdesc+=' (with ROCM support)'
depends+=('rocm-hip-runtime' 'hipblas' 'rocblas')
if [[ $_ENABLE_PIPER = 1 ]]; then depends+=('onnxruntime'); fi
- if [[ $_ENABLE_PYTHON = 1 ]]; then depends+=('python-pytorch-rocm'); fi
+ if [[ $_IS_PYTHON_FLAVOR = 1 ]]; then depends+=("${_python_depends[@]}"); depends+=('python-pytorch-rocm'); fi
_package_install
}
+
+package_localai-git-python() {
+ _IS_PYTHON_FLAVOR=1
+ package_localai-git "$@"
+}
+
+package_localai-git-cuda-python() {
+ _IS_PYTHON_FLAVOR=1
+ package_localai-git-cuda "$@"
+}
+
+package_localai-git-rocm-python() {
+ _IS_PYTHON_FLAVOR=1
+ package_localai-git-rocm "$@"
+}
diff --git a/backend-req.patch b/backend-req.patch
deleted file mode 100644
index 6be8cbe0491c..000000000000
--- a/backend-req.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-diff --git a/backend/python/coqui/requirements.txt b/backend/python/coqui/requirements.txt
-index 6125f739..7738f8ff 100644
---- a/backend/python/coqui/requirements.txt
-+++ b/backend/python/coqui/requirements.txt
-@@ -1,4 +1,4 @@
--TTS==0.22.0
-+coqui-tts
- grpcio==1.65.5
- protobuf
- certifi
-\ No newline at end of file
-diff --git a/backend/python/transformers-musicgen/requirements.txt b/backend/python/transformers-musicgen/requirements.txt
-index a0076112..e31e1250 100644
---- a/backend/python/transformers-musicgen/requirements.txt
-+++ b/backend/python/transformers-musicgen/requirements.txt
-@@ -1,4 +1,4 @@
- grpcio==1.65.5
- protobuf
--scipy==1.14.0
-+scipy
- certifi
-\ No newline at end of file
-diff --git a/backend/python/transformers/requirements.txt b/backend/python/transformers/requirements.txt
-index 5531ea0e..9fdb848d 100644
---- a/backend/python/transformers/requirements.txt
-+++ b/backend/python/transformers/requirements.txt
-@@ -1,4 +1,3 @@
- grpcio==1.65.5
- protobuf
- certifi
--setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406
-\ No newline at end of file
diff --git a/libbackend.patch b/libbackend.patch
deleted file mode 100644
index 4ea970e84d4c..000000000000
--- a/libbackend.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-diff --git a/backend/python/common/libbackend.sh b/backend/python/common/libbackend.sh
-index 934b1fd3..870ba409 100644
---- a/backend/python/common/libbackend.sh
-+++ b/backend/python/common/libbackend.sh
-@@ -88,7 +88,7 @@ function getBuildProfile() {
- # always result in an activated virtual environment
- function ensureVenv() {
- if [ ! -d "${EDIR}/venv" ]; then
-- uv venv ${EDIR}/venv
-+ uv venv --system-site-packages ${EDIR}/venv
- echo "virtualenv created"
- fi
-
-@@ -125,35 +125,12 @@ function ensureVenv() {
- function installRequirements() {
- ensureVenv
-
-- # These are the requirements files we will attempt to install, in order
-- declare -a requirementFiles=(
-- "${EDIR}/requirements-install.txt"
-- "${EDIR}/requirements.txt"
-- "${EDIR}/requirements-${BUILD_TYPE}.txt"
-- )
--
-- if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
-- requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
-- fi
--
-- # if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
-- if [ "x${BUILD_TYPE}" == "x" ]; then
-- requirementFiles+=("${EDIR}/requirements-cpu.txt")
-- fi
--
-- requirementFiles+=("${EDIR}/requirements-after.txt")
--
-- if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
-- requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
-- fi
--
-- for reqFile in ${requirementFiles[@]}; do
-+ reqFile="${EDIR}/requirements.txt"
- if [ -f ${reqFile} ]; then
- echo "starting requirements install for ${reqFile}"
- uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile}
- echo "finished requirements install for ${reqFile}"
- fi
-- done
- }
-
- # startBackend discovers and runs the backend GRPC server
diff --git a/localai.service b/localai.service
index b60a4e74551c..c0386e5fc106 100644
--- a/localai.service
+++ b/localai.service
@@ -15,33 +15,52 @@ WorkingDirectory=%S/%N
Restart=on-failure
# PrivateTmp=yes
-# default environment and local env
+# python_backends, default environment and local env
+EnvironmentFile=-%E/%N/python_backends.conf
EnvironmentFile=%E/%N/%N.conf
EnvironmentFile=-%S/%N/.env
# start server
-ExecStart=/usr/bin/localai run \
- --audio-path="%T/%N/audio" \
- --backend-assets-path="%S/%N" \
- --config-path="%S/%N" \
- --external-grpc-backends=bark:%S/%N/backend-assets/grpc/python/bark/run.sh,coqui:%S/%N/backend-assets/grpc/python/coqui/run.sh,diffusers:%S/%N/backend-assets/grpc/python/diffusers/run.sh,parler-tts:%S/%N/backend-assets/grpc/python/parler-tts/run.sh,rerankers:%S/%N/backend-assets/grpc/python/rerankers/run.sh,sentencetransformers:%S/%N/backend-assets/grpc/python/sentencetransformers/run.sh,transformers:%S/%N/backend-assets/grpc/python/transformers/run.sh,transformers-musicgen:%S/%N/backend-assets/grpc/python/transformers-musicgen/run.sh \
- --image-path="%T/%N/images" \
- --localai-config-dir="%S/%N/config" \
- --models-path="%S/%N/models" \
- --upload-path="%T/%N/upload"
+ExecStart==bash -c '\
+ python_backends=""; \
+ if test -n "$ARCH_LOCALAI_PYTHON_BACKENDS"; then \
+ python_backends=(${(f)"$(echo $ARCH_LOCALAI_PYTHON_BACKENDS)"}); \
+ for line in "${python_backends[@]}"; do \
+ entry="${line%% *}:%S/%N/backend-assets/grpc/python/${line%% *}/run.sh"; \
+ if test -z "python_backends"; then \
+ python_backends="--external-grpc-backends=$entry"; \
+ else \
+ python_backends="$python_backends,$entry"; \
+ fi; \
+ done; \
+ fi; \
+ /usr/bin/localai run \
+ --audio-path="%T/%N/audio" \
+ --backend-assets-path="%S/%N" \
+ --config-path="%S/%N" \
+ $python_backends \
+ --image-path="%T/%N/images" \
+ --localai-config-dir="%S/%N/config" \
+ --models-path="%S/%N/models" \
+ --upload-path="%T/%N/upload" \
+'
# create virtualenvs for python backends, recreate if localai is newer than venv
-# disabled python backends: autogptq exllama exllama2 mamba openvoice petals vall-e-x vllm
ExecStartPost=bash -c 'sleep 3;\
-for i in bark coqui diffusers parler-tts rerankers \
- sentencetransformers transformers transformers-musicgen; do \
- bedir="%S/%N/backend-assets/grpc/python/$i"; \
- if test /usr/bin/localai -nt $bedir/venv; then rm -r $bedir/venv; fi; \
- if test ! -d $bedir/venv; then \
- echo "re/creating $bedir"; \
- %S/%N/backend-assets/grpc/python/$i/install.sh; \
- fi; \
-done'
+if test -n "$ARCH_LOCALAI_PYTHON_BACKENDS"; then \
+ python_backends=(${(f)"$(echo $ARCH_LOCALAI_PYTHON_BACKENDS)"}); \
+ for line in "${python_backends[@]}"; do \
+ backend="${line%% *}"; dependencies=""; \
+ if test "$backend" != "$line"; then dependencies="${line#* }"; fi; \
+ bedir="%S/%N/backend-assets/grpc/python/$backend"; \
+ if test /usr/bin/localai -nt $bedir/venv; then rm -r $bedir/venv; fi; \
+ if test ! -d $bedir/venv; then \
+ echo "re/creating $bedir"; \
+ uv venv --system-site-packages $bedir/venv; \
+ if test -n "$dependencies"; then uv pip install $dependencies; fi \
+ fi; \
+ done; \
+fi'
# make some time for virtualenvs to be installed
TimeoutStartSec=180