Package Details: python-tensorrt 10.5.0.18-1

Git Clone URL: https://aur.archlinux.org/tensorrt.git (read-only, click to copy)
Package Base: tensorrt
Description: A platform for high-performance deep learning inference on NVIDIA hardware (python bindings and tools)
Upstream URL: https://developer.nvidia.com/tensorrt/
Keywords: ai artificial intelligence nvidia
Licenses: Apache-2.0, LicenseRef-custom
Provides: python-onnx-graphsurgeon, python-polygraphy, python-tensorflow-quantization
Submitter: dbermond
Maintainer: dbermond
Last Packager: dbermond
Votes: 19
Popularity: 1.00
First Submitted: 2018-07-29 16:17 (UTC)
Last Updated: 2024-10-15 18:09 (UTC)

Dependencies (18)

Sources (13)

Latest Comments

« First ‹ Previous 1 2 3 4 5 6 7 8 9 Next › Last »

pwf commented on 2023-05-29 19:26 (UTC) (edited on 2023-05-29 19:54 (UTC) by pwf)

was able to install GA with the following patch (i think... haven't tried it from scratch, and ended up editing a few files in src, but i think this is all the changes):

diff --git a/020-tensorrt-fix-python.patch b/020-tensorrt-fix-python.patch
index b0d11f9..f416e22 100644
--- a/020-tensorrt-fix-python.patch
+++ b/020-tensorrt-fix-python.patch
@@ -1,14 +1,5 @@
 --- a/python/CMakeLists.txt
 +++ b/python/CMakeLists.txt
-@@ -40,7 +40,7 @@ set(CMAKE_CXX_STANDARD ${CPP_STANDARD})
- 
- if (NOT MSVC)
-     # This allows us to use TRT libs shipped with standalone wheels.
--    set(CMAKE_SHARED_LINKER_FLAGS -Wl,-rpath=$ORIGIN)
-+    #set(CMAKE_SHARED_LINKER_FLAGS -Wl,-rpath=$ORIGIN)
- endif()
- 
- # -------- PATHS --------
 @@ -91,7 +91,7 @@ if (MSVC)
      find_path(PY_LIB_DIR ${PYTHON_LIB_NAME}.lib HINTS ${WIN_EXTERNALS}/${PYTHON} ${EXT_PATH}/${PYTHON} PATH_SUFFIXES lib)
      message(STATUS "PY_LIB_DIR: ${PY_LIB_DIR}")
@@ -64,22 +55,13 @@
          ${1} > ${2}
  }

-@@ -59,6 +62,6 @@ pushd ${ROOT_PATH}/python/packaging
+@@ -59,6 +62,9 @@ pushd ${ROOT_PATH}/python/packaging
  for dir in $(find . -type d); do mkdir -p ${WHEEL_OUTPUT_DIR}/$dir; done
  for file in $(find . -type f); do expand_vars_cp $file ${WHEEL_OUTPUT_DIR}/${file}; done
  popd
--python3 setup.py -q bdist_wheel --python-tag=cp${PYTHON_MAJOR_VERSION}${PYTHON_MINOR_VERSION} --plat-name=linux_${TARGET}
-+python -m build --wheel --no-isolation
++cp tensorrt/tensorrt.so bindings_wheel/tensorrt/tensorrt.so
++
++pushd ${WHEEL_OUTPUT_DIR}/bindings_wheel
+ python3 setup.py -q bdist_wheel --python-tag=cp${PYTHON_MAJOR_VERSION}${PYTHON_MINOR_VERSION} --plat-name=linux_${TARGET}

  popd
---- a/python/src/parsers/pyOnnx.cpp
-+++ b/python/src/parsers/pyOnnx.cpp
-@@ -17,7 +17,7 @@
- 
- // Implementation of PyBind11 Binding Code for OnnxParser
- #include "ForwardDeclarations.h"
--#include "onnxOpenSource/NvOnnxParser.h"
-+#include "onnx/NvOnnxParser.h"
- #include "parsers/pyOnnxDoc.h"
- #include "utils.h"
- #include <pybind11/stl.h>
diff --git a/PKGBUILD b/PKGBUILD
index 22b1105..dd61ad4 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -9,9 +9,9 @@

 pkgbase=tensorrt
 pkgname=('tensorrt' 'python-tensorrt')
-pkgver=8.6.0.12
+pkgver=8.6.1.6
 _cudaver=12.0
-_cudnnver=8.8
+_cudnnver=8.9
 _protobuf_ver=3.20.1
 _pybind11_ver=2.9.2
 _graphsurgeon_ver=0.4.6
@@ -39,7 +39,7 @@ source=("local://TensorRT-${pkgver}.Linux.${CARCH}-gnu.cuda-${_cudaver}.tar.gz"
         '020-tensorrt-fix-python.patch'
         'TensorRT-SLA.txt')
 noextract=("protobuf-cpp-${_protobuf_ver}.tar.gz")
-sha256sums=('033efe9dc4f3d2b179af0d5afbefd504b15dbb1547920a90115d45e559ae6e77'
+sha256sums=('0f8157a5fc5329943b338b893591373350afa90ca81239cdadd7580cd1eba254'
             'SKIP'
             'SKIP'
             'SKIP'
@@ -49,7 +49,7 @@ sha256sums=('033efe9dc4f3d2b179af0d5afbefd504b15dbb1547920a90115d45e559ae6e77'
             'SKIP'
             'dddd73664306d7d895a95e1cf18925b31b52785e468727e4635b45edae5166f9'
             'ba94c0685216fe9566f7989df98b372e72a8da04b66d64380024107f2f7f4a8f'
-            '36233e5484ba7adb364699ba0e71ada119666edec55a5b96263e0c3265f8ebd3'
+            'dee4c446121e68afff8e034bc5d67c056db2f443daec21f9b1339d0de8ee5676'
             'ff3140050390f7b61703c71de0885f11583456abf2402bb6d3990add13fd0e33')

 prepare() {
@@ -161,7 +161,7 @@ package_python-tensorrt() {

     local _dir
     for _dir in "TensorRT-${pkgver}"/{graphsurgeon,uff} \
-                 TensorRT/{python/build/dist,tools/{onnx-graphsurgeon,Polygraphy,tensorflow-quantization}/dist}
+                 TensorRT/{python/build/bindings_wheel/dist,tools/{onnx-graphsurgeon,Polygraphy,tensorflow-quantization}/dist}
     do
         cd "${srcdir}/${_dir}"
         python -m installer --destdir="$pkgdir" *.whl

and actually, cudnn version should now be 8.9 yeah?

Moebius14 commented on 2023-05-26 21:37 (UTC)

@mergen I got the same error when I accidentally downloaded TensorRT version 8.6.1.6 instead of 8.6.0.12 Make sure you download version 8.6.0.12. You can find it under "TensorRT 8.6 EA". Heres the link for your convenience:

https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.6.0/tars/TensorRT-8.6.0.12.Linux.x86_64-gnu.cuda-12.0.tar.gz

mergen commented on 2023-05-15 08:35 (UTC) (edited on 2023-05-15 09:18 (UTC) by mergen)

I modified the PKGBUILD to new version and sha256sum but I still get an error:

Submodule 'parsers/onnx' (https://github.com/onnx/onnx-tensorrt.git) registered for path 'parsers/onnx'
Submodule 'third_party/cub' (https://github.com/NVlabs/cub.git) registered for path 'third_party/cub'
Submodule 'third_party/protobuf' (https://github.com/protocolbuffers/protobuf.git) registered for path 'third_party/protobuf'
Cloning into '/home/mrbs/pg/tensorrt/src/TensorRT/parsers/onnx'...
done.
Cloning into '/home/mrbs/pg/tensorrt/src/TensorRT/third_party/cub'...
done.
Cloning into '/home/mrbs/pg/tensorrt/src/TensorRT/third_party/protobuf'...
done.
Submodule path 'parsers/onnx': checked out '6ba67d3428e05f690145373ca87fb8d32f98df45'
Submodule path 'third_party/cub': checked out 'c3cceac115c072fb63df1836ff46d8c60d9eb304'
Submodule path 'third_party/protobuf': checked out 'aea4a275e28329f648e046469c095eef74254bb2'
Submodule 'third_party/onnx' (https://github.com/onnx/onnx.git) registered for path 'third_party/onnx'
Cloning into '/home/mrbs/pg/tensorrt/src/TensorRT/parsers/onnx/third_party/onnx'...
done.
Submodule path 'third_party/onnx': checked out 'ad834eb73ee0cd9b6fa9ea892caeed5fa17d7dc0'
Submodule 'third_party/benchmark' (https://github.com/google/benchmark.git) registered for path 'third_party/benchmark'
Submodule 'third_party/pybind11' (https://github.com/pybind/pybind11.git) registered for path 'third_party/pybind11'
Cloning into '/home/mrbs/pg/tensorrt/src/TensorRT/parsers/onnx/third_party/onnx/third_party/benchmark'...
done.
Cloning into '/home/mrbs/pg/tensorrt/src/TensorRT/parsers/onnx/third_party/onnx/third_party/pybind11'...
done.
Submodule path 'third_party/benchmark': checked out '0d98dba29d66e93259db7daa53a9327df767a415'
Submodule path 'third_party/pybind11': checked out '914c06fb252b6cc3727d0eedab6736e88a3fcb01'
patching file third_party/protobuf.cmake
patching file python/CMakeLists.txt
Hunk #1 FAILED at 40.
1 out of 3 hunks FAILED -- saving rejects to file python/CMakeLists.txt.rej
patching file python/build.sh
patching file python/src/parsers/pyOnnx.cpp
Reversed (or previously applied) patch detected!  Skipping patch.
1 out of 1 hunk ignored -- saving rejects to file python/src/parsers/pyOnnx.cpp.rej

The content of the file ./src/TensorRT/python/CMakeLists.txt.rej is:

--- python/CMakeLists.txt
+++ python/CMakeLists.txt
@@ -40,7 +40,7 @@ set(CMAKE_CXX_STANDARD ${CPP_STANDARD})

if (NOT MSVC)
# This allows us to use TRT libs shipped with standalone wheels.
-    set(CMAKE_SHARED_LINKER_FLAGS -Wl,-rpath=$ORIGIN)
+    #set(CMAKE_SHARED_LINKER_FLAGS -Wl,-rpath=$ORIGIN)
endif()

# -------- PATHS --------

The content of the file ./src/TensorRT/python/src/parsers/pyOnnx.cpp.rej is:

--- python/src/parsers/pyOnnx.cpp
+++ python/src/parsers/pyOnnx.cpp
@@ -17,7 +17,7 @@

// Implementation of PyBind11 Binding Code for OnnxParser
#include "ForwardDeclarations.h"
-#include "onnxOpenSource/NvOnnxParser.h"
+#include "onnx/NvOnnxParser.h"
#include "parsers/pyOnnxDoc.h"
#include "utils.h"
#include <pybind11/stl.h>

DavTheRaveUK commented on 2023-05-03 13:24 (UTC)

I followed your instructions only to be met with the following error:

==> ERROR: 010-tensorrt-use-local-protobuf-sources.patch was not found in the build directory and is not a URL.

What gives?

feiticeir0 commented on 2023-03-19 10:23 (UTC)

@dbermond Thank you ! Just compiled well ! Now Tensorflow is finally working with Nvidia GPU.

dbermond commented on 2023-03-18 19:42 (UTC)

@Smoolak @feiticeir0 Package updated. Building fine for me now. The current version supports cuda 12.0.

feiticeir0 commented on 2023-03-14 00:42 (UTC) (edited on 2023-03-14 00:46 (UTC) by feiticeir0)

I'm getting the following error:


-- Build files have been written to: /tmp/makepkg/tensorrt/src/build
make: Entering directory '/tmp/makepkg/tensorrt/src/build'
[  2%] Built target third_party.protobuf
[  2%] Built target gen_onnx_proto
[  2%] Built target caffe_proto
[  2%] Built target gen_onnx_operators_proto
[  2%] Built target gen_onnx_data_proto
[  4%] Built target onnx_proto
[  8%] Built target nvcaffeparser_static
[ 12%] Built target nvcaffeparser
[ 17%] Built target nvonnxparser_static
[ 17%] Built target nvonnxparser
make[2]: *** No rule to make target '/opt/cuda/lib/libcudart_static.a', needed by 'libnvinfer_plugin.so.8.5.3'.  Stop.
make[1]: *** [CMakeFiles/Makefile2:1166: plugin/CMakeFiles/nvinfer_plugin.dir/all] Error 2
make[1]: *** Waiting for unfinished jobs....
[ 58%] Built target nvinfer_plugin_static
make: *** [Makefile:156: all] Error 2
make: Leaving directory '/tmp/makepkg/tensorrt/src/build'
==> ERROR: A failure occurred in build().
    Aborting...

I've downgraded CUDA and CUDNN to match the tensorRT version, but now this is happening.. Any hints ?

/opt/cuda/lib

does not exits, but instead there is lib64

Even if I create a symlink to lib, it does not work... Thank you

Smoolak commented on 2023-03-10 17:20 (UTC)

Oh ok. Look like I've been very unlucky and decided to install this on the wrong day. CUDA was just updated in the repos, yesterday's version is working fine. I downgraded my system using the Arch Linux Archive.

Smoolak commented on 2023-03-10 16:48 (UTC)

I'm getting this error:

[ 24%] Building CXX object plugin/CMakeFiles/nvinfer_plugin.dir/bertQKVToContextPlugin/qkvToContextPlugin.cpp.o
[ 24%] Building CXX object plugin/CMakeFiles/nvinfer_plugin.dir/embLayerNormPlugin/embLayerNormPlugin.cpp.o
[ 24%] Building CXX object plugin/CMakeFiles/nvinfer_plugin.dir/embLayerNormPlugin/embLayerNormVarSeqlenPlugin.cpp.o
[ 24%] Building CXX object plugin/CMakeFiles/nvinfer_plugin.dir/fcPlugin/fcPlugin.cpp.o
In file included from /home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/common/bertCommon.h:26,
                 from /home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.h:27,
                 from /home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.cpp:24:
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.h: In member function ‘void nvinfer1::plugin::bert::AlgoProps::populate(const cublasLtMatmulAlgo_t&)’:
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.h:379:25: error: ‘CUBLASLT_ALGO_CAP_MATHMODE_IMPL’ was not declared in this scope; did you mean ‘CUBLASLT_ALGO_CAP_TILE_IDS’?
  379 |             matmulAlgo, CUBLASLT_ALGO_CAP_MATHMODE_IMPL, &mathMode, sizeof(mathMode), nullptr));
      |                         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/common/checkMacrosPlugin.h:215:19: note: in definition of macro ‘PLUGIN_CUBLASASSERT’
  215 |         auto s_ = status_;                                                                                             \
      |                   ^~~~~~~
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.cpp: In function ‘void LtGemmSearch(cublasLtHandle_t, cublasOperation_t, cublasOperation_t, const int&, const int&, const int&, const void*, const void*, const int&, const void*, const int&, const void*, void*, const int&, void*, size_t, cublasComputeType_t, cudaDataType_t, cudaDataType_t, cudaDataType_t, cudaDataType_t, std::vector<nvinfer1::plugin::bert::customMatMultPerfType_t>&)’:
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.cpp:171:21: error: ‘CUBLASLT_MATMUL_PREF_MATH_MODE_MASK’ was not declared in this scope; did you mean ‘CUBLASLT_MATMUL_PREF_IMPL_MASK’?
  171 |         preference, CUBLASLT_MATMUL_PREF_MATH_MODE_MASK, &mathMode, sizeof(mathMode)));
      |                     ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/common/checkMacrosPlugin.h:215:19: note: in definition of macro ‘PLUGIN_CUBLASASSERT’
  215 |         auto s_ = status_;                                                                                             \
      |                   ^~~~~~~
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/fcPlugin/fcPlugin.cpp:218:20: error: ‘CUBLASLT_ALGO_CAP_MATHMODE_IMPL’ was not declared in this scope; did you mean ‘CUBLASLT_ALGO_CAP_TILE_IDS’?
  218 |             &algo, CUBLASLT_ALGO_CAP_MATHMODE_IMPL, &mathMode, sizeof(mathMode), nullptr));
      |                    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/smoolak/.cache/yay/tensorrt/src/TensorRT/plugin/common/checkMacrosPlugin.h:215:19: note: in definition of macro ‘PLUGIN_CUBLASASSERT’
  215 |         auto s_ = status_;                                                                                             \
      |                   ^~~~~~~
make[2]: *** [plugin/CMakeFiles/nvinfer_plugin.dir/build.make:2260: plugin/CMakeFiles/nvinfer_plugin.dir/fcPlugin/fcPlugin.cpp.o] Error 1
make[1]: *** [CMakeFiles/Makefile2:1166: plugin/CMakeFiles/nvinfer_plugin.dir/all] Error 2
make: *** [Makefile:156: all] Error 2
make: Leaving directory '/home/smoolak/.cache/yay/tensorrt/src/build'
==> ERROR: A failure occurred in build().
    Aborting...
 -> error making: tensorrt