blob: 459d00050ca5cd8dd9920085cb95f94038179f7e (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
pkgbase = python-vllm-cuda
pkgdesc = high-throughput and memory-efficient inference and serving engine for LLMs
pkgver = 0.6.5
pkgrel = 2
url = https://github.com/vllm-project/vllm
arch = x86_64
license = Apache-2.0
makedepends = git
makedepends = gcc13
makedepends = cuda
makedepends = cuda-tools
depends = python-installer
depends = python
depends = python-pytorch
provides = python-vllm
conflicts = python-vllm
source = git+https://github.com/vllm-project/vllm.git#tag=v0.6.5
sha256sums = 5c8228773ae38ddda2d38f5fc68a259856cab4891766792d859cdaf225e63bc5
pkgname = python-vllm-cuda
|