blob: f4232ef64015dc095f4c1ea13c10fc1d1d8fa7f0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
pkgbase = llama.cpp-sycl-f32-git
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with Intel SYCL GPU optimizations and F32)
pkgver = b5123
pkgrel = 1
url = https://github.com/ggerganov/llama.cpp
arch = armv7h
arch = aarch64
arch = x86_64
license = MIT
makedepends = cmake
makedepends = git
makedepends = intel-oneapi-basekit
depends = intel-oneapi-basekit
optdepends = python-gguf: convert_hf_to_gguf python script
optdepends = python-numpy: convert_hf_to_gguf.py python script
optdepends = python-pytorch: convert_hf_to_gguf.py python script
provides = llama.cpp-sycl-f32
conflicts = llama.cpp-sycl-f32
source = llama.cpp-sycl-f32::git+https://github.com/ggerganov/llama.cpp
source = kompute::git+https://github.com/nomic-ai/kompute.git
source = llama.cpp.conf
source = llama.cpp.service
sha256sums = SKIP
sha256sums = SKIP
sha256sums = 53fa70cfe40cb8a3ca432590e4f76561df0f129a31b121c9b4b34af0da7c4d87
sha256sums = 1fc9d4f0cfa407404acc3859c26c53a79d14f5e5bc72f21084d87dde04e36f20
pkgname = llama.cpp-sycl-f32-git
|