summarylogtreecommitdiffstats
path: root/.SRCINFO
blob: 17e80a012ac77b477bc15f33608efac7f8d86e0f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
pkgbase = tensorrt
	pkgdesc = A platform for high-performance deep learning inference using NVIDIA hardware
	pkgver = 7.2.1.6
	pkgrel = 1
	url = https://github.com/NVIDIA/TensorRT/
	arch = x86_64
	license = custom
	license = Apache
	makedepends = git
	makedepends = cmake
	makedepends = poppler
	makedepends = pybind11
	makedepends = python
	makedepends = python-pip
	makedepends = zlib
	makedepends = cuda
	makedepends = cudnn
	noextract = protobuf-cpp-3.12.4.tar.gz
	source = local://TensorRT-7.2.1.6.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.0.tar.gz
	source = git+https://github.com/NVIDIA/TensorRT.git#tag=7.2.1
	source = protobuf-protocolbuffers::git+https://github.com/protocolbuffers/protobuf.git#branch=3.8.x
	source = git+https://github.com/NVIDIA/cub.git#tag=1.8.0
	source = git+https://github.com/onnx/onnx-tensorrt.git#branch=7.2.1
	source = git+https://github.com/onnx/onnx.git#branch=rel-1.6.0
	source = git+https://github.com/pybind/pybind11.git
	source = git+https://github.com/google/benchmark.git
	source = https://github.com/google/protobuf/releases/download/v3.12.4/protobuf-cpp-3.12.4.tar.gz
	source = 010-tensorrt-use-local-protobuf-sources.patch
	source = 020-tensorrt-fix-cub-deprecation-huge-warnings.patch
	sha256sums = eef37d387b74f452c86d1ffe58bc57d0514409591e1ac2ae9d11b3167d8d5e6b
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = SKIP
	sha256sums = ccfbaaba52f67e0e6536a05f3df3f6618620d255513cfca3a07f5935b624e26b
	sha256sums = ea25bb1b188d53cbfbec35d242ab2a2fa8d6009c547c9f5f67bc2f1ad127ceac
	sha256sums = e6153bf43c248fb3ed843e41f6b722ff8c3507ad48fe105bfa129b8641741ecf

pkgname = tensorrt
	depends = cuda
	depends = cudnn
	optdepends = python-numpy: for graphsurgeon, onnx_graphsurgeon and uff python modules
	optdepends = python-onnx: for onnx_graphsurgeon python module
	optdepends = python-protobuf: for uff python module and convert-to-uff tool
	optdepends = python-tensorflow-cuda: for graphsurgeon and uff python modules and convert-to-uff tool

pkgname = tensorrt-doc
	pkgdesc = A platform for high-performance deep learning inference using NVIDIA hardware (documentation)
	arch = any
	license = custom