summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorlilac2021-11-06 22:45:28 +0800
committerlilac2021-11-06 22:45:28 +0800
commit705252fad4d442b7f225516146a149e417090d11 (patch)
tree0e450b8cc2810e2d8b7bec8eb0904cd4408b5e37
parent680d5843d571b57e6d8ea102e8e9ea2031738b34 (diff)
downloadaur-705252fad4d442b7f225516146a149e417090d11.tar.gz
[lilac] updated to 1.9.1-4
-rw-r--r--.SRCINFO20
-rw-r--r--PKGBUILD143
-rw-r--r--notes.txt13
-rw-r--r--system-dnnl.diff40
4 files changed, 110 insertions, 106 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 924dd0acc346..54884c7d0929 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,20 +1,10 @@
pkgbase = python-onnxruntime
pkgdesc = Cross-platform, high performance scoring engine for ML models
pkgver = 1.9.1
- pkgrel = 1
+ pkgrel = 4
url = https://github.com/microsoft/onnxruntime
arch = x86_64
license = MIT
- checkdepends = python-pytest
- checkdepends = python-pytorch
- checkdepends = python-h5py
- checkdepends = python-pandas
- checkdepends = python-psutil
- checkdepends = python-tqdm
- checkdepends = python-sympy
- checkdepends = python-torchvision
- checkdepends = tensorboard
- checkdepends = python-cerberus
makedepends = git
makedepends = cmake
makedepends = gtest
@@ -37,6 +27,7 @@ pkgbase = python-onnxruntime
depends = python-onnx
depends = python-protobuf
depends = openmpi
+ depends = onednn
options = !lto
source = git+https://github.com/microsoft/onnxruntime#tag=v1.9.1
source = git+https://github.com/onnx/onnx.git
@@ -48,6 +39,7 @@ pkgbase = python-onnxruntime
source = pytorch_cpuinfo::git+https://github.com/pytorch/cpuinfo.git
source = build-fixes.patch
source = clang.patch
+ source = system-dnnl.diff
sha512sums = SKIP
sha512sums = SKIP
sha512sums = SKIP
@@ -58,10 +50,12 @@ pkgbase = python-onnxruntime
sha512sums = SKIP
sha512sums = 685f0235abed6e1277dd0eb9bda56c464d1987fe7fc90a3550e17ec70cc49fd15f34996a0e159f9622c4ca3e6bf29917fe51b7849342531fa2a6808d782f1e06
sha512sums = ad94af8bb25744b244c4f82e9a06189741f82b295a88523ca0e8005568fac710c2299d783989457e9cf96ef8da0593fb4f70c8792d416f44ab29d6493e204f13
+ sha512sums = 6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08
pkgname = python-onnxruntime
pkgname = python-onnxruntime-cuda
+ pkgdesc = Cross-platform, high performance scoring engine for ML models (CUDA execution provider)
depends = nsync
depends = re2
depends = python-flatbuffers
@@ -69,8 +63,8 @@ pkgname = python-onnxruntime-cuda
depends = python-onnx
depends = python-protobuf
depends = openmpi
+ depends = onednn
depends = cuda
depends = cudnn
depends = nccl
- provides = python-onnxruntime=1.9.1
- conflicts = python-onnxruntime
+ depends = python-onnxruntime
diff --git a/PKGBUILD b/PKGBUILD
index b7be61267c75..631cd223a318 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -1,16 +1,17 @@
# Maintainer: Chih-Hsuan Yen <yan12125@gmail.com>
pkgbase=python-onnxruntime
+# Not split DNNL EP to another package as it's needed unconditionally at runtime if built at compile time
+# https://github.com/microsoft/onnxruntime/blob/v1.9.1/onnxruntime/python/onnxruntime_pybind_state.cc#L533
pkgname=(python-onnxruntime python-onnxruntime-cuda)
pkgver=1.9.1
pkgdesc='Cross-platform, high performance scoring engine for ML models'
-pkgrel=1
+pkgrel=4
arch=(x86_64)
url='https://github.com/microsoft/onnxruntime'
license=(MIT)
-depends=(nsync re2 python-flatbuffers python-numpy python-onnx python-protobuf openmpi)
+depends=(nsync re2 python-flatbuffers python-numpy python-onnx python-protobuf openmpi onednn)
makedepends=(git cmake gtest gmock pybind11 python-setuptools nlohmann-json chrono-date boost eigen flatbuffers cuda cudnn nccl clang)
-checkdepends=(python-pytest python-pytorch python-h5py python-pandas python-psutil python-tqdm python-sympy python-torchvision tensorboard python-cerberus)
# not de-vendored libraries
# onnx: needs shared libonnx (https://github.com/onnx/onnx/issues/3030)
source=("git+https://github.com/microsoft/onnxruntime#tag=v$pkgver"
@@ -22,7 +23,8 @@ source=("git+https://github.com/microsoft/onnxruntime#tag=v$pkgver"
"git+https://github.com/jarro2783/cxxopts.git"
"pytorch_cpuinfo::git+https://github.com/pytorch/cpuinfo.git"
build-fixes.patch
- clang.patch)
+ clang.patch
+ system-dnnl.diff)
sha512sums=('SKIP'
'SKIP'
'SKIP'
@@ -32,7 +34,8 @@ sha512sums=('SKIP'
'SKIP'
'SKIP'
'685f0235abed6e1277dd0eb9bda56c464d1987fe7fc90a3550e17ec70cc49fd15f34996a0e159f9622c4ca3e6bf29917fe51b7849342531fa2a6808d782f1e06'
- 'ad94af8bb25744b244c4f82e9a06189741f82b295a88523ca0e8005568fac710c2299d783989457e9cf96ef8da0593fb4f70c8792d416f44ab29d6493e204f13')
+ 'ad94af8bb25744b244c4f82e9a06189741f82b295a88523ca0e8005568fac710c2299d783989457e9cf96ef8da0593fb4f70c8792d416f44ab29d6493e204f13'
+ '6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08')
# CUDA seems not working with LTO
options+=('!lto')
@@ -44,10 +47,7 @@ prepare() {
patch -Np1 -i ../build-fixes.patch
patch -Np1 -i ../clang.patch
-
- # 1.9.0 is marked as 1.10.0 https://github.com/microsoft/onnxruntime/blob/v1.9.0/VERSION_NUMBER
- # Official wheels are not affected, though
- echo $pkgver > VERSION_NUMBER
+ patch -Np1 -i ../system-dnnl.diff
git submodule init
for mod in onnx SafeInt optional-lite tensorboard dlpack cxxopts pytorch_cpuinfo; do
@@ -56,82 +56,60 @@ prepare() {
done
}
-_build() {
- build_dir=$1
- shift
-
+build() {
cd "$srcdir"/onnxruntime
+
+ local cmake_args=(
+ -DCMAKE_INSTALL_PREFIX=/usr
+ -Donnxruntime_ENABLE_PYTHON=ON
+ -Donnxruntime_PREFER_SYSTEM_LIB=ON
+ -Donnxruntime_BUILD_SHARED_LIB=ON
+ -Donnxruntime_ENABLE_TRAINING=ON
+ -Donnxruntime_USE_MPI=ON
+ -Donnxruntime_USE_PREINSTALLED_EIGEN=ON
+ -Donnxruntime_USE_DNNL=ON
+ -Deigen_SOURCE_PATH=/usr/include/eigen3
+ )
+
# Use protobuf-lite instead of full protobuf to workaround symbol conflicts
# with onnx; see https://github.com/onnx/onnx/issues/1277 for details.
- CC=/usr/bin/clang CXX=/usr/bin/clang++ \
- cmake -B $build_dir -S cmake \
- -DCMAKE_INSTALL_PREFIX=/usr \
- -Donnxruntime_ENABLE_PYTHON=ON \
- -DONNX_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc \
- -Donnxruntime_PREFER_SYSTEM_LIB=ON \
- -Donnxruntime_USE_FULL_PROTOBUF=OFF \
- -Donnxruntime_BUILD_SHARED_LIB=ON \
- -Donnxruntime_ENABLE_TRAINING=ON \
- -Donnxruntime_USE_MPI=ON \
- -Donnxruntime_USE_PREINSTALLED_EIGEN=ON \
- -Deigen_SOURCE_PATH=/usr/include/eigen3 \
- "$@"
-
- cd $build_dir
- make
- python ../setup.py build
-}
+ cmake_args+=(
+ -DONNX_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc
+ -Donnxruntime_USE_FULL_PROTOBUF=OFF
+ )
-build() {
- _build build
-
- # 1. Use clang as GCC does not work. GCC 11 crashes with internal
- # compiler errors. GCC 10 does not work as some dependent packages
- # (ex: re2) are built with libstdc++ from GCC 11, and thus linking
- # onnxruntime with libstdc++ 10 fails.
- # 2. Redefine ___is_signed to ___is_signed to workaround a regression
+ # 1. Redefine ___is_signed to ___is_signed to workaround a regression
# from CUDA 11.3 -> 11.3.1 [1].
+ # 2. Enable parallel builds for NVCC via -t0, which spawns multiple
+ # cicc and ptxas processes for each nvcc invocation. The number of
+ # total processes may be much larger than the number of cores - let
+ # the scheduler handle it.
# [1] https://forums.developer.nvidia.com/t/182176
- _build build-cuda \
- -DCMAKE_CUDA_HOST_COMPILER=/usr/bin/clang \
- -DCMAKE_CUDA_FLAGS="-D__is_signed=___is_signed" \
- -DCMAKE_CUDA_ARCHITECTURES="$_CUDA_ARCHITECTURES" \
- -Donnxruntime_USE_CUDA=ON \
- -Donnxruntime_CUDA_HOME=/opt/cuda \
- -Donnxruntime_CUDNN_HOME=/usr \
+ cmake_args+=(
+ -DCMAKE_CUDA_HOST_COMPILER=/usr/bin/clang
+ -DCMAKE_CUDA_FLAGS="-D__is_signed=___is_signed -t0"
+ -DCMAKE_CUDA_ARCHITECTURES="$_CUDA_ARCHITECTURES"
+ -Donnxruntime_USE_CUDA=ON
+ -Donnxruntime_CUDA_HOME=/opt/cuda
+ -Donnxruntime_CUDNN_HOME=/usr
-Donnxruntime_USE_NCCL=ON
-}
+ )
-_check() {
- # Test models are no longer publicly available [1]
- # [1] https://github.com/microsoft/onnxruntime/issues/7447
- GTEST_FILTER='-*ModelTest*' ARGS="--rerun-failed --output-on-failure" make test
- # launch_test.py seems a script, and orttraining_* include BERT tests, which require the
- # transformers package, and failed even if the latter is installed.
-
- # XXX: Some python tests failed (ex: [1]). In those tests, tested ONNX models are
- # generated on the fly using the onnx python library (ex: [2]). When the latter
- # is newer than the included onnx submodule, loading a tested ONNX model may fail
- # as the IR version for a tested model may be higher than the IR version used in
- # onnxruntime.
- # [1] https://build.archlinuxcn.org/~imlonghao/log/python-onnxruntime/2021-08-07T12%3A17%3A01.html
- # [2] https://github.com/microsoft/onnxruntime/blob/v1.8.2/onnxruntime/test/python/quantization/test_op_gemm.py#L28-L76
- LD_LIBRARY_PATH="$PWD" pytest \
- --ignore launch_test.py \
- --ignore orttraining_run_bert_pretrain.py \
- --ignore orttraining_run_frontend_batch_size_test.py \
- --ignore transformers
-}
-
-check() {
- cd "$srcdir"/onnxruntime/build
- _check
+ # Use clang as GCC does not work. GCC 11 crashes with internal
+ # compiler errors. GCC 10 does not work as some dependent packages
+ # (ex: re2) are built with libstdc++ from GCC 11, and thus linking
+ # onnxruntime with libstdc++ 10 fails.
+ CC=/usr/bin/clang CXX=/usr/bin/clang++ \
+ cmake -B build -S cmake "${cmake_args[@]}" "$@"
- cd "$srcdir"/onnxruntime/build-cuda
- # _check # requires machines with CUDA-compatible devices
+ cd build
+ make
+ python ../setup.py build
}
-_package() {
+package_python-onnxruntime() {
+ cd onnxruntime/build
+
make install DESTDIR="$pkgdir"
python ../setup.py install --root="$pkgdir" --skip-build --optimize=1
@@ -143,18 +121,17 @@ _package() {
done
# already installed by `make install`, and not useful as this path is not looked up by the linker
rm -vf "$pkgdir/$PY_ORT_DIR"/capi/libonnxruntime_providers_*
-}
-package_python-onnxruntime() {
- cd onnxruntime/build
- _package
+ # installed as split packages
+ rm -vf "$pkgdir"/usr/lib/libonnxruntime_providers_cuda.so
}
package_python-onnxruntime-cuda() {
- depends+=(cuda cudnn nccl)
- conflicts=(python-onnxruntime)
- provides=("python-onnxruntime=$pkgver")
+ depends+=(cuda cudnn nccl python-onnxruntime)
+ pkgdesc+=' (CUDA execution provider)'
- cd onnxruntime/build-cuda
- _package
+ cd onnxruntime/build
+ install -Dm755 libonnxruntime_providers_cuda.so -t "$pkgdir"/usr/lib
+ install -Ddm755 "$pkgdir"/usr/share/licenses
+ ln -s python-onnxruntime "$pkgdir"/usr/share/licenses/$pkgname
}
diff --git a/notes.txt b/notes.txt
index cc19868adbdb..62efe9cd463b 100644
--- a/notes.txt
+++ b/notes.txt
@@ -1,10 +1,3 @@
-Merging two builds?
- Since 1.9.0, "GPU package can be used on both CPU-only and GPU machines" [1]
- Package size considerations: make libonnxruntime_providers_cuda.so a separate package
- There will be warnings from the CUDA build on non-CUDA machines
- /usr/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:352: UserWarning: Deprecation warning. This ORT build has ['CUDAExecutionProvider', 'CPUExecutionProvider'] enabled. The next release (ORT 1.10) will require explicitly setting the providers parameter (as opposed to the current behavior of providers getting set/registered by default based on the build flags) when instantiating InferenceSession.For example, onnxruntime.InferenceSession(..., providers=["CUDAExecutionProvider"], ...)
- warnings.warn("Deprecation warning. This ORT build has {} enabled. ".format(available_providers) +
- 2021-09-23 23:47:08.573828136 [E:onnxruntime:Default, provider_bridge_ort.cc:944 Get] Failed to load library libonnxruntime_providers_cuda.so with error: libcublas.so.11: cannot open shared object file: No such file or directory
- 2021-09-23 23:47:08.574000870 [E:onnxruntime:Default, provider_bridge_ort.cc:944 Get] Failed to load library libonnxruntime_providers_cuda.so with error: libcublas.so.11: cannot open shared object file: No such file or directory
-
- [1] https://github.com/microsoft/onnxruntime/releases/tag/v1.9.0
+Build system changes in 1.10
+ Build: respect onnxruntime_PREFER_SYSTEM_LIB for more things https://github.com/microsoft/onnxruntime/pull/9181
+ Remove optional-lite https://github.com/microsoft/onnxruntime/pull/9424
diff --git a/system-dnnl.diff b/system-dnnl.diff
new file mode 100644
index 000000000000..1444d8acaa1e
--- /dev/null
+++ b/system-dnnl.diff
@@ -0,0 +1,40 @@
+diff --git a/cmake/external/dnnl.cmake b/cmake/external/dnnl.cmake
+index 6a51a3d5d..a89635210 100644
+--- a/cmake/external/dnnl.cmake
++++ b/cmake/external/dnnl.cmake
+@@ -26,6 +26,13 @@ elseif(onnxruntime_USE_DNNL AND onnxruntime_DNNL_GPU_RUNTIME STREQUAL "ocl" AND
+ endif()
+
+ if (onnxruntime_USE_DNNL)
++if (onnxruntime_PREFER_SYSTEM_LIB)
++ # https://oneapi-src.github.io/oneDNN/dev_guide_transition_to_dnnl.html
++ find_package(dnnl CONFIG REQUIRED)
++ add_library(project_dnnl INTERFACE)
++ add_library(dnnl INTERFACE)
++ target_link_libraries(dnnl INTERFACE DNNL::dnnl)
++else ()
+ set(DNNL_SOURCE ${CMAKE_CURRENT_BINARY_DIR}/dnnl/src/dnnl/src)
+ set(DNNL_INSTALL ${CMAKE_CURRENT_BINARY_DIR}/dnnl/install)
+ set(DNNL_LIB_DIR ${DNNL_INSTALL}/${CMAKE_INSTALL_LIBDIR})
+@@ -55,3 +62,4 @@ if (onnxruntime_USE_DNNL)
+ )
+ link_directories(${DNNL_LIB_DIR})
+ endif()
++endif()
+diff --git a/cmake/onnxruntime_unittests.cmake b/cmake/onnxruntime_unittests.cmake
+index 6bdb2d03c..514faa375 100644
+--- a/cmake/onnxruntime_unittests.cmake
++++ b/cmake/onnxruntime_unittests.cmake
+@@ -744,10 +744,12 @@ add_custom_command(
+ if (NOT onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
+ if (onnxruntime_USE_DNNL)
+ list(APPEND onnx_test_libs dnnl)
++ if (NOT onnxruntime_PREFER_SYSTEM_LIB)
+ add_custom_command(
+ TARGET ${test_data_target} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${DNNL_DLL_PATH} $<TARGET_FILE_DIR:${test_data_target}>
+ )
++ endif()
+ endif()
+ if(WIN32)
+ if (onnxruntime_USE_TVM)