summarylogtreecommitdiffstats
path: root/.SRCINFO
blob: a2b3ff8303b86986c223bc41e1745ddbcbd0d19d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
pkgbase = tensorrt
	pkgdesc = A platform for high-performance deep learning inference on NVIDIA hardware
	pkgver = 8.2.3.0
	pkgrel = 1
	url = https://developer.nvidia.com/tensorrt/
	arch = x86_64
	license = custom:NVIDIA-SLA
	license = Apache
	makedepends = git
	makedepends = cmake
	makedepends = poppler
	makedepends = cuda
	makedepends = cudnn
	makedepends = pybind11
	makedepends = python
	makedepends = python-onnx
	makedepends = python-pip
	makedepends = python-wheel
	noextract = protobuf-cpp-3.17.3.tar.gz
	source = local://TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
	source = git+https://github.com/NVIDIA/TensorRT.git#tag=22.02
	source = protobuf-protocolbuffers::git+https://github.com/protocolbuffers/protobuf.git
	source = cub-nvlabs::git+https://github.com/NVlabs/cub.git
	source = git+https://github.com/onnx/onnx-tensorrt.git
	source = git+https://github.com/onnx/onnx.git
	source = git+https://github.com/pybind/pybind11.git
	source = git+https://github.com/google/benchmark.git
	source = https://github.com/google/protobuf/releases/download/v3.17.3/protobuf-cpp-3.17.3.tar.gz
	source = 010-tensorrt-use-local-protobuf-sources.patch
	source = 020-tensorrt-fix-python.patch
	sha256sums = 207c0c4820e5acf471925b7da4c59d48c58c265a27d88287c4263038c389e106
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = 51cec99f108b83422b7af1170afd7aeb2dd77d2bcbb7b6bad1f92509e9ccf8cb
	sha256sums = ea25bb1b188d53cbfbec35d242ab2a2fa8d6009c547c9f5f67bc2f1ad127ceac
	sha256sums = 10a94e8117e2c0b43de7fdf6f5c1a7fbdd954f8603fe054edd88689d3714e1c2

pkgname = tensorrt
	depends = cuda
	depends = cudnn

pkgname = python-tensorrt
	pkgdesc = A platform for high-performance deep learning inference on NVIDIA hardware (python bindings and tools)
	depends = python
	depends = python-numpy
	depends = python-pycuda
	depends = tensorrt
	optdepends = python-onnx: for onnx_graphsurgeon python module
	optdepends = python-onnxruntime: for onnx_graphsurgeon and polygraphy python modules
	optdepends = python-protobuf: for polygraphy and uff python modules
	optdepends = python-tensorflow-cuda: for graphsurgeon, polygraphy and uff python modules and convert-to-uff tool

pkgname = tensorrt-doc
	pkgdesc = A platform for high-performance deep learning inference on NVIDIA hardware (documentation)
	arch = any
	license = custom:NVIDIA-SLA