blob: e630e09d3d0d6eb85317684d7a075b7967108654 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
pkgbase = ollama-cuda
pkgdesc = Create, run and share large language models (LLMs) with CUDA
pkgver = 0.1.16
pkgrel = 1
url = https://github.com/jmorganca/ollama
arch = x86_64
license = MIT
makedepends = cmake
makedepends = cuda
makedepends = git
makedepends = go
makedepends = setconf
provides = ollama
conflicts = ollama
source = git+https://github.com/jmorganca/ollama#commit=6ee8c80199866f1d1826ca8f8239e7e70c96fab7
source = ggml::git+https://github.com/ggerganov/llama.cpp#commit=9e232f0234073358e7031c1b8d7aa45020469a3b
source = gguf::git+https://github.com/ggerganov/llama.cpp#commit=948ff137ec37f1ec74c02905917fa0afc9b97514
source = sysusers.conf
source = tmpfiles.d
source = ollama.service
b2sums = SKIP
b2sums = SKIP
b2sums = SKIP
b2sums = 3aabf135c4f18e1ad745ae8800db782b25b15305dfeaaa031b4501408ab7e7d01f66e8ebb5be59fc813cfbff6788d08d2e48dcf24ecc480a40ec9db8dbce9fec
b2sums = c890a741958d31375ebbd60eeeb29eff965a6e1e69f15eb17ea7d15b575a4abee176b7d407b3e1764aa7436862a764a05ad04bb9901a739ffd81968c09046bb6
b2sums = a773bbf16cf5ccc2ee505ad77c3f9275346ddf412be283cfeaee7c2e4c41b8637a31aaff8766ed769524ebddc0c03cf924724452639b62208e578d98b9176124
pkgname = ollama-cuda
|