blob: 1226839e85e7aa4e2070bfca22155e18133db378 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
|
pkgbase = llama.cpp-vulkan
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Vulkan GPU optimizations)
pkgver = b6451
pkgrel = 1
url = https://github.com/ggerganov/llama.cpp
arch = x86_64
arch = armv7h
arch = aarch64
license = MIT
makedepends = cmake
makedepends = git
makedepends = shaderc
makedepends = vulkan-headers
depends = curl
depends = gcc-libs
depends = glibc
depends = python
depends = vulkan-icd-loader
optdepends = python-numpy: needed for convert_hf_to_gguf.py
optdepends = python-safetensors: needed for convert_hf_to_gguf.py
optdepends = python-sentencepiece: needed for convert_hf_to_gguf.py
optdepends = python-pytorch: needed for convert_hf_to_gguf.py
optdepends = python-transformers: needed for convert_hf_to_gguf.py
provides = llama.cpp
conflicts = llama.cpp
conflicts = libggml
conflicts = ggml
conflicts = stable-diffusion.cpp
options = lto
options = !debug
source = llama.cpp-vulkan-b6451.tar.gz::https://github.com/ggml-org/llama.cpp/archive/refs/tags/b6451.tar.gz
sha256sums = 9b1bb282c520b736c40a1f87637211356586ca68fa61dccdd8d68be1966f751e
pkgname = llama.cpp-vulkan
|