blob: c3b9f30de62b4d495ab37d98e580f3f4a36bf093 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
|
pkgbase = llama-cpp
pkgdesc = Port of Facebook's LLaMA model in C/C++
pkgver = c3e53b4
pkgrel = 1
url = https://github.com/ggerganov/llama.cpp
arch = x86
arch = x86_64
arch = arm
arch = aarch64
license = GPL3
makedepends = cmake
makedepends = intel-oneapi-dpcpp-cpp
makedepends = cuda
makedepends = intel-oneapi-mkl
makedepends = clblast
depends = openmpi
depends = python-numpy
depends = python-sentencepiece
source = https://github.com/ggerganov/llama.cpp/archive/master-c3e53b4.tar.gz
sha256sums = 7bf8a74bd3393b2c96abca17099487dccdd114c6bb5bb59b70daf02efe437606
pkgname = llama-cpp
depends = openmpi
depends = python-numpy
depends = python-sentencepiece
depends = openblas
depends = intel-oneapi-mkl
pkgname = llama-cpp-cuda
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with CUDA)
depends = openmpi
depends = python-numpy
depends = python-sentencepiece
depends = cuda
provides = llama-cpp
conflicts = llama-cpp
pkgname = llama-cpp-opencl
pkgdesc = Port of Facebook's LLaMA model in C/C++ (with OpenCL)
depends = openmpi
depends = python-numpy
depends = python-sentencepiece
depends = clblast
provides = llama-cpp
conflicts = llama-cpp
|