summarylogtreecommitdiffstats
path: root/PKGBUILD
diff options
context:
space:
mode:
Diffstat (limited to 'PKGBUILD')
-rw-r--r--PKGBUILD381
1 files changed, 290 insertions, 91 deletions
diff --git a/PKGBUILD b/PKGBUILD
index 631cd223a318..87bc82829791 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,137 +1,336 @@
-# Maintainer: Chih-Hsuan Yen <yan12125@gmail.com>
+# Maintainer: Gustavo Alvarez <sl1pkn07@gmail.com>
+# Contributor: Chih-Hsuan Yen <yan12125@gmail.com>
+
+# MAKEFLAGS=-j2 # NOTE Can be usseful when got OOM
+
+_ENABLE_CUDA=1
+_ENABLE_TENSORRT=0 # NOTE: not working due https://github.com/microsoft/onnxruntime/issues/15131
pkgbase=python-onnxruntime
# Not split DNNL EP to another package as it's needed unconditionally at runtime if built at compile time
# https://github.com/microsoft/onnxruntime/blob/v1.9.1/onnxruntime/python/onnxruntime_pybind_state.cc#L533
-pkgname=(python-onnxruntime python-onnxruntime-cuda)
-pkgver=1.9.1
+pkgname=(
+ 'onnxruntime'
+ 'python-onnxruntime'
+)
+pkgver=1.16.3
pkgdesc='Cross-platform, high performance scoring engine for ML models'
-pkgrel=4
-arch=(x86_64)
+pkgrel=1
+arch=('x86_64')
url='https://github.com/microsoft/onnxruntime'
-license=(MIT)
-depends=(nsync re2 python-flatbuffers python-numpy python-onnx python-protobuf openmpi onednn)
-makedepends=(git cmake gtest gmock pybind11 python-setuptools nlohmann-json chrono-date boost eigen flatbuffers cuda cudnn nccl clang)
+license=('MIT')
+makedepends=(
+ 'git'
+ 'cmake'
+ 'ninja'
+ 'gcc-libs'
+ 'glibc'
+ 'cxxopts'
+ 'pybind11'
+ 'abseil-cpp'
+ 'nlohmann-json'
+ 'chrono-date'
+ 'boost'
+ 'eigen'
+# 'flatbuffers'
+ 'onednn'
+# 're2'
+# 'protobuf'
+ 'nsync'
+ 'openmpi'
+ 'python-coloredlogs'
+ 'python-flatbuffers'
+ 'python-numpy'
+# 'python-protobuf'
+ 'python-sympy'
+ 'python-setuptools'
+ 'python-installer'
+ 'python-wheel'
+ 'python-build'
+
+ 'chrpath'
+)
# not de-vendored libraries
# onnx: needs shared libonnx (https://github.com/onnx/onnx/issues/3030)
-source=("git+https://github.com/microsoft/onnxruntime#tag=v$pkgver"
- "git+https://github.com/onnx/onnx.git"
- "git+https://github.com/dcleblanc/SafeInt.git"
- "git+https://github.com/martinmoene/optional-lite.git"
- "git+https://github.com/tensorflow/tensorboard.git"
- "git+https://github.com/dmlc/dlpack.git"
- "git+https://github.com/jarro2783/cxxopts.git"
- "pytorch_cpuinfo::git+https://github.com/pytorch/cpuinfo.git"
- build-fixes.patch
- clang.patch
- system-dnnl.diff)
-sha512sums=('SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- 'SKIP'
- '685f0235abed6e1277dd0eb9bda56c464d1987fe7fc90a3550e17ec70cc49fd15f34996a0e159f9622c4ca3e6bf29917fe51b7849342531fa2a6808d782f1e06'
- 'ad94af8bb25744b244c4f82e9a06189741f82b295a88523ca0e8005568fac710c2299d783989457e9cf96ef8da0593fb4f70c8792d416f44ab29d6493e204f13'
- '6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08')
-# CUDA seems not working with LTO
-options+=('!lto')
+source=(
+ "git+https://github.com/microsoft/onnxruntime#tag=v${pkgver}"
+ 'install-orttraining-files.diff'
+ 'system-dnnl.diff'
+# 'system-flatbuffers.patch'
+)
+sha512sums=(
+ 'SKIP'
+ 'SKIP'
+ 'SKIP'
+# 'SKIP'
+)
+options=('debug')
+
+if [[ $_ENABLE_CUDA = 1 ]]; then
+ pkgname+=('onnxruntime-cuda')
+ makedepends+=(
+ 'cuda'
+ 'cudnn'
+ 'nccl'
+ )
+fi
+
+if [[ $_ENABLE_TENSORRT = 1 ]]; then
+ pkgname+=('onnxruntime-tensorrt')
+ makedepends+=('tensorrt')
+# depends+=('protobuf' 'libprotobuf.so')
+fi
# Check PKGBUILDs of python-pytorch and tensorflow for CUDA architectures built by official packages
-_CUDA_ARCHITECTURES="52-real;53-real;60-real;61-real;62-real;70-real;72-real;75-real;80-real;86-real;86-virtual"
+_CUDA_ARCHITECTURES="52-real;53-real;60-real;61-real;62-real;70-real;72-real;75-real;80-real;86-real;87-real;89-real;90-real;90-virtual"
prepare() {
cd onnxruntime
- patch -Np1 -i ../build-fixes.patch
- patch -Np1 -i ../clang.patch
- patch -Np1 -i ../system-dnnl.diff
+ # Use System Dlnn
+ patch -Np1 -i "${srcdir}/system-dnnl.diff"
+
+ # Find system nlohmann-json
+ sed 's|3.10 ||g' \
+ -i cmake/external/onnxruntime_external_deps.cmake
+
+ # Find system chrono-date
+ sed -e 's|${DEP_SHA1_date}|&\n \ \ \ \ \ \FIND_PACKAGE_ARGS NAMES date|g' \
+ -e 's|date_interface|date::date-tz|g' \
+ -i cmake/external/onnxruntime_external_deps.cmake \
+ -i cmake/onnxruntime_common.cmake \
+ -i cmake/onnxruntime_unittests.cmake
+
+ # Find system abseil-cpp
+ sed 's|ABSL_PATCH_COMMAND}|&\n\ \ \ \ \FIND_PACKAGE_ARGS NAMES absl|g' \
+ -i cmake/external/abseil-cpp.cmake
+
+ # Find system cxxopts
+ sed 's|${DEP_SHA1_cxxopts}|&\n\ \ \ \ \FIND_PACKAGE_ARGS NAMES cxxopts|g' \
+ -i cmake/external/onnxruntime_external_deps.cmake
+
+# # Find system mimalloc
+# sed 's|${DEP_SHA1_mimalloc}|&\n\ \ \ \ \ \ \FIND_PACKAGE_ARGS NAMES mimalloc|g' \
+# -i cmake/external/onnxruntime_external_deps.cmake
+
+ # Find system nsync
+ sed -e 's|NAMES nsync|&_cpp|g' \
+ -e '295aadd_library(nsync::nsync_cpp ALIAS nsync_cpp)' \
+ -i cmake/external/onnxruntime_external_deps.cmake
- git submodule init
- for mod in onnx SafeInt optional-lite tensorboard dlpack cxxopts pytorch_cpuinfo; do
- git config submodule.cmake/external/$mod.url "$srcdir"/$mod
- git submodule update cmake/external/$mod
- done
+ if [[ $_ENABLE_TENSORRT = 1 ]]; then
+ # Update Tensorboard 8343cad89d984c199637ead11c8d4c053191673a (2.15.1)
+# # Update Tensorboard a01ceb5957d9ecd56314df115c09e3ddb60d12f7
+# sed -e 's|373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81|a01ceb5957d9ecd56314df115c09e3ddb60d12f7|g' \
+# -e 's|ff427b6a135344d86b65fa2928fbd29886eefaec|113750f323d131859ac4e17070d2c9417e80d701|g' \
+# -i cmake/deps.txt
+
+
+ # Update onnx_tensorrt 6ba67d3428e05f690145373ca87fb8d32f98df45 (8.6 GA)
+ sed -e 's|0462dc31ae78f48744b6141ae376df1f96d3f459|6ba67d3428e05f690145373ca87fb8d32f98df45|g' \
+ -e 's|67b833913605a4f3f499894ab11528a702c2b381|67b833913605a4f3f499894ab11528a702c2b381|g' \
+ -i cmake/deps.txt
+ fi
+
+ patch -Np1 -i "${srcdir}/install-orttraining-files.diff"
+# patch -Np1 -i "${srcdir}/system-flatbuffers.patch"
+
+ # fix build with gcc12(?), take idea from https://github.com/microsoft/onnxruntime/pull/11667 and https://github.com/microsoft/onnxruntime/pull/10014
+ sed 's|dims)|TensorShape(dims))|g' \
+ -i onnxruntime/contrib_ops/cuda/quantization/qordered_ops/qordered_qdq.cc
+
+ # fix missing #include <iostream>
+ sed '11a#include <iostream>' \
+ -i orttraining/orttraining/test/training_api/trainer/trainer.cc
+
+# cd onnxruntime/core/flatbuffers/schema
+# python compile_schema.py --flatc /usr/bin/flatc
}
build() {
- cd "$srcdir"/onnxruntime
- local cmake_args=(
+ if [[ ${_ENABLE_CUDA} = 1 ]]; then
+ export CC="/opt/cuda/bin/gcc"
+ export CXX="/opt/cuda/bin/g++"
+ export CUDAHOSTCXX="${CXX}"
+ fi
+
+ # Gcc 12+
+ CXXFLAGS+=" -Wno-maybe-uninitialized -Wno-error=restrict"
+ CFLAGS="${CFLAGS/_FORTIFY_SOURCE=2/_FORTIFY_SOURCE=0}"
+ CXXFLAGS="${CXXFLAGS/_FORTIFY_SOURCE=2/_FORTIFY_SOURCE=0}"
+
+
+ # Use -Donnxruntime_ENABLE_LAZY_TENSOR=OFF as it requires patched python-pytorch
+ # See: https://github.com/microsoft/onnxruntime/pull/10460 https://github.com/pytorch/pytorch/pulls/wschin
+ local _cmake_args=(
+ -DCMAKE_BUILD_TYPE=Debug
-DCMAKE_INSTALL_PREFIX=/usr
+ -DCMAKE_SKIP_INSTALL_RPATH=OFF
+ -DCMAKE_SKIP_RPATH=OFF
-Donnxruntime_ENABLE_PYTHON=ON
- -Donnxruntime_PREFER_SYSTEM_LIB=ON
-Donnxruntime_BUILD_SHARED_LIB=ON
+ -Donnxruntime_BUILD_UNIT_TESTS=OFF
-Donnxruntime_ENABLE_TRAINING=ON
+ -Donnxruntime_ENABLE_LAZY_TENSOR=OFF
-Donnxruntime_USE_MPI=ON
- -Donnxruntime_USE_PREINSTALLED_EIGEN=ON
-Donnxruntime_USE_DNNL=ON
- -Deigen_SOURCE_PATH=/usr/include/eigen3
+ -Donnxruntime_USE_PREINSTALLED_EIGEN=ON
+ -Deigen_SOURCE_PATH="$(pkg-config --cflags eigen3 | sed 's|-I||g')"
+ -DCMAKE_CXX_STANDARD=17
+ -DCMAKE_IGNORE_PATH=/usr/lib/cmake/flatbuffers/\;/lib/cmake/flatbuffers/\;/usr/lib/cmake/protobuf/\;/lib/cmake/protobuf/
+ -DBUILD_TESTING=OFF
)
# Use protobuf-lite instead of full protobuf to workaround symbol conflicts
# with onnx; see https://github.com/onnx/onnx/issues/1277 for details.
- cmake_args+=(
- -DONNX_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc
+ _cmake_args+=(
-Donnxruntime_USE_FULL_PROTOBUF=OFF
)
- # 1. Redefine ___is_signed to ___is_signed to workaround a regression
- # from CUDA 11.3 -> 11.3.1 [1].
- # 2. Enable parallel builds for NVCC via -t0, which spawns multiple
- # cicc and ptxas processes for each nvcc invocation. The number of
- # total processes may be much larger than the number of cores - let
- # the scheduler handle it.
- # [1] https://forums.developer.nvidia.com/t/182176
- cmake_args+=(
- -DCMAKE_CUDA_HOST_COMPILER=/usr/bin/clang
- -DCMAKE_CUDA_FLAGS="-D__is_signed=___is_signed -t0"
- -DCMAKE_CUDA_ARCHITECTURES="$_CUDA_ARCHITECTURES"
- -Donnxruntime_USE_CUDA=ON
- -Donnxruntime_CUDA_HOME=/opt/cuda
- -Donnxruntime_CUDNN_HOME=/usr
- -Donnxruntime_USE_NCCL=ON
- )
+ if [[ ${_ENABLE_CUDA} = 1 ]]; then
+ _cmake_args+=(
+ -DCMAKE_CUDA_ARCHITECTURES="${_CUDA_ARCHITECTURES}"
+ -DCMAKE_CUDA_STANDARD_REQUIRED=ON
+ -DCMAKE_CXX_STANDARD_REQUIRED=ON
+ -Donnxruntime_USE_CUDA=ON
+ -Donnxruntime_CUDA_HOME=/opt/cuda
+ -Donnxruntime_CUDNN_HOME=/usr
+ -Donnxruntime_USE_NCCL=ON
+ -Donnxruntime_NVCC_THREADS=1
+ )
+ fi
- # Use clang as GCC does not work. GCC 11 crashes with internal
- # compiler errors. GCC 10 does not work as some dependent packages
- # (ex: re2) are built with libstdc++ from GCC 11, and thus linking
- # onnxruntime with libstdc++ 10 fails.
- CC=/usr/bin/clang CXX=/usr/bin/clang++ \
- cmake -B build -S cmake "${cmake_args[@]}" "$@"
+ if [[ ${_ENABLE_TENSORRT} = 1 ]]; then
+ _cmake_args+=(
+ -Donnxruntime_USE_TENSORRT=ON
+ -Donnxruntime_USE_TENSORRT_BUILTIN_PARSER=ON
+ )
+ fi
+ cmake -S onnxruntime/cmake -B build \
+ "${_cmake_args[@]}" \
+ "$@" \
+ -G Ninja
+
+ LC_ALL=C cmake --build build #-v
+
+ (
cd build
- make
- python ../setup.py build
+ install -Dm644 ../onnxruntime/docs/python/README.rst docs/python/README.rst
+ ln -s ../onnxruntime/setup.py .
+ python -m build --wheel --no-isolation
+ )
+
}
-package_python-onnxruntime() {
- cd onnxruntime/build
+package_onnxruntime() {
+ depends=(
+ 'gcc-libs' # libgcc_s.so libstdc++.so
+ 'glibc' # ld-linux-x86-64.so libc.so ibm.so
+ 'onednn' # libdnnl.so
+ 'openmpi' 'libmpi.so'
+ 'abseil-cpp' # libabsl_hash.so libabsl_raw_hash_set.so libabsl_raw_logging_internal.so libabsl_throw_delegate.so
+ 'nsync' # libnsync_cpp.so
+# 'protobuf' 'libprotobuf-lite.so'
+ )
+ provides=(
+ 'libonnxruntime.so'
+ 'libonnxruntime_providers_shared.so'
+ )
- make install DESTDIR="$pkgdir"
+ DESTDIR="${pkgdir}" cmake --install build
- python ../setup.py install --root="$pkgdir" --skip-build --optimize=1
+ # installed as split packages
+ rm -vf "${pkgdir}/usr/lib/"libonnxruntime_providers_{tensorrt,cuda}.so
- PY_ORT_DIR="$(python -c 'import site; print(site.getsitepackages()[0])')/onnxruntime"
- install -Ddm755 "$pkgdir"/usr/share/licenses/$pkgname
- for f in LICENSE ThirdPartyNotices.txt ; do
- ln -s "$PY_ORT_DIR/$f" "$pkgdir"/usr/share/licenses/$pkgname/$f
- done
- # already installed by `make install`, and not useful as this path is not looked up by the linker
- rm -vf "$pkgdir/$PY_ORT_DIR"/capi/libonnxruntime_providers_*
+ chrpath -d "${pkgdir}/usr/lib/"libonnxruntime.so.*
- # installed as split packages
- rm -vf "$pkgdir"/usr/lib/libonnxruntime_providers_cuda.so
+ install -Dm644 onnxruntime/LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+ install -Dm644 onnxruntime/ThirdPartyNotices.txt "${pkgdir}/usr/share/licenses/${pkgname}/ThirdPartyNotices.txt"
+}
+
+package_python-onnxruntime() {
+ pkgdesc+=' (Python Bindings)'
+ depends=(
+ 'onnxruntime'
+ 'gcc-libs' # libgcc_s.so libstdc++.so
+ 'glibc' # ld-linux-x86-64.so libc.so libm.so
+ 'abseil-cpp' # libabsl_hash.so libabsl_raw_hash_set.so libabsl_raw_logging_internal.so libabsl_throw_delegate.so
+ 'openmpi' 'libmpi.so'
+ 'nsync' # libnsync_cpp.so
+# 'protobuf' 'libprotobuf-lite.so'
+ 'python-coloredlogs'
+ 'python-flatbuffers'
+ 'python-numpy'
+# 'python-protobuf'
+ 'python-sympy'
+ 'python-packaging'
+ 'python-setuptools'
+ 'python-requests'
+ )
+ optdepends=(
+ # https://github.com/microsoft/onnxruntime/pull/9969
+ 'python-onnx: for the backend API, quantization, orttraining, transformers and various tools'
+ 'python-psutil: for transformers'
+ 'python-py-cpuinfo: for transformers'
+ 'python-py3nvml: for transformers'
+ 'python-transformers: for transformers'
+ 'python-scipy: for transformers and various tools'
+ 'python-pytorch: for transformers, orttraining and various tools'
+ 'python-pytorch-cuda'
+ 'python-cerberus: for orttraining'
+ 'python-h5py: for orttraining'
+ 'python-matplotlib'
+ 'python-tensorflow-opt-cuda'
+ 'python-importlib-metadata'
+ )
+
+ python -m installer --destdir="${pkgdir}" build/dist/*.whl
+
+ _PY_ORT_DIR="$(python -c 'import site; print(site.getsitepackages()[0])')/onnxruntime"
+ # already installed by `cmake --install`, and not useful as this path is not looked up by the linker
+ rm -vf "${pkgdir}/${_PY_ORT_DIR}"/capi/libonnxruntime_providers_*
+ chrpath -d "${pkgdir}/${_PY_ORT_DIR}/capi/onnxruntime_pybind11_state.so"
+
+ install -Ddm755 "${pkgdir}/usr/share/licenses"
+ ln -s onnxruntime "${pkgdir}/usr/share/licenses/${pkgname}"
}
-package_python-onnxruntime-cuda() {
- depends+=(cuda cudnn nccl python-onnxruntime)
+package_onnxruntime-cuda() {
pkgdesc+=' (CUDA execution provider)'
+ depends=(
+ 'gcc-libs' # libgcc_s.so libstdc++.so
+ 'glibc' # ld-linux-x86-64.so libc.so libm.so
+ 'cudnn' # libcudnn.so
+ 'nccl' # libnccl.so
+ 'openmpi' 'libmpi.so'
+ 'nsync' # libnsync_cpp.so
+ 'abseil-cpp' # libabsl_hash.so libabsl_raw_hash_set.so libabsl_raw_logging_internal.so libabsl_throw_delegate.so
+ 'cuda' 'libcublas.so' 'libcudart.so' # libcublasLt.so libcufft.so
+# 'protobuf' 'libprotobuf-lite.so'
+ )
+ conflicts=('python-onnxruntime-cuda')
+ replaces=('python-onnxruntime-cuda')
+
+ install -Dm755 build/libonnxruntime_providers_cuda.so -t "${pkgdir}/usr/lib"
+
+ install -Ddm755 "${pkgdir}/usr/share/licenses"
+ ln -s onnxruntime "${pkgdir}/usr/share/licenses/${pkgname}"
+}
+
+package_onnxruntime-tensorrt() {
+ pkgdesc+=' (TensorRT execution provider)'
+ depends=(
+ 'tensorrt'
+# 'protobuf' 'libprotobuf-lite.so'
+ 'nsync'
+# 'flatbuffers'
+ )
+ pkgdesc+=' (TENSORRT execution provider)'
+
+ install -Dm755 build/libonnxruntime_providers_tensorrt.so -t "${pkgdir}/usr/lib"
- cd onnxruntime/build
- install -Dm755 libonnxruntime_providers_cuda.so -t "$pkgdir"/usr/lib
- install -Ddm755 "$pkgdir"/usr/share/licenses
- ln -s python-onnxruntime "$pkgdir"/usr/share/licenses/$pkgname
+ install -Ddm755 "${pkgdir}/usr/share/licenses"
+ ln -s onnxruntime "${pkgdir}/usr/share/licenses/${pkgname}"
}