1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
# Maintainer: Chih-Hsuan Yen <yan12125@gmail.com>
pkgbase=python-onnxruntime
pkgname=(python-onnxruntime python-onnxruntime-cuda)
pkgver=1.8.0
pkgdesc='Cross-platform, high performance scoring engine for ML models'
pkgrel=1
arch=(x86_64)
url='https://github.com/microsoft/onnxruntime'
license=(MIT)
depends=(nsync re2 python-flatbuffers python-numpy python-onnx python-protobuf openmpi)
makedepends=(git cmake gtest gmock pybind11 python-setuptools nlohmann-json chrono-date boost flatbuffers cuda cudnn nccl clang)
checkdepends=(python-pytest python-pytorch python-h5py python-pandas python-psutil python-tqdm python-sympy python-torchvision tensorboard python-cerberus)
# not de-vendored libraries
# eigen: API changes a lot since extra/eigen 3.3.7 to the commit onnxruntime uses
# onnx: needs shared libonnx (https://github.com/onnx/onnx/issues/3030)
# https://github.com/microsoft/onnxruntime/blob/v1.1.2/onnxruntime/core/protobuf/onnx-ml.proto#L250-L251
source=("git+https://github.com/microsoft/onnxruntime#tag=v$pkgver"
"git+https://gitlab.com/libeigen/eigen.git"
"git+https://github.com/onnx/onnx.git"
"git+https://github.com/dcleblanc/SafeInt.git"
"git+https://github.com/martinmoene/optional-lite.git"
"git+https://github.com/tensorflow/tensorboard.git"
"git+https://github.com/dmlc/dlpack.git"
"git+https://github.com/jarro2783/cxxopts.git"
build-fixes.patch
clang.patch)
sha512sums=('SKIP'
'SKIP'
'SKIP'
'SKIP'
'SKIP'
'SKIP'
'SKIP'
'SKIP'
'685f0235abed6e1277dd0eb9bda56c464d1987fe7fc90a3550e17ec70cc49fd15f34996a0e159f9622c4ca3e6bf29917fe51b7849342531fa2a6808d782f1e06'
'55ba879c015df11582ff4afaa9ccca19c0e3d3a8be503629718402dbdc826e36bc3ec3ce4dd589705371d7fcf250ba2a9b30c5a3dd0cfccb8e008346f1bd6252')
prepare() {
cd onnxruntime
patch -Np1 -i ../build-fixes.patch
patch -Np1 -i ../clang.patch
git submodule init
for mod in eigen onnx SafeInt optional-lite tensorboard dlpack cxxopts; do
git config submodule.cmake/external/$mod.url "$srcdir"/$mod
git submodule update cmake/external/$mod
done
}
_build() {
build_dir=$1
shift
cd "$srcdir"/onnxruntime
# Use protobuf-lite instead of full protobuf to workaround symbol conflicts
# with onnx; see https://github.com/onnx/onnx/issues/1277 for details.
CC=/usr/bin/clang CXX=/usr/bin/clang++ \
cmake -B $build_dir -S cmake \
-DCMAKE_INSTALL_PREFIX=/usr \
-Donnxruntime_ENABLE_PYTHON=ON \
-DONNX_CUSTOM_PROTOC_EXECUTABLE=/usr/bin/protoc \
-Donnxruntime_PREFER_SYSTEM_LIB=ON \
-Donnxruntime_USE_FULL_PROTOBUF=OFF \
-Donnxruntime_BUILD_SHARED_LIB=ON \
-Donnxruntime_ENABLE_TRAINING=ON \
-Donnxruntime_USE_MPI=ON \
"$@"
cd $build_dir
make
python ../setup.py build
}
build() {
_build build
# Use clang as GCC does not work. GCC 11 uses C++ 17 by default. On the
# other hand, onnxruntime uses C++ 14. However, nvcc does not correctly
# pass -std=c++14 to the host compiler, and thus preprocessed files
# contains C++ 17 language features, and cicc failed to parse it. GCC 10
# does not work, either, as some dependent packages (ex: re2) are built
# with libstdc++ 11, and linking onnxruntime with libstdc++ 10 fails.
_build build-cuda \
-DCMAKE_CUDA_HOST_COMPILER=/usr/bin/clang \
-Donnxruntime_USE_CUDA=ON \
-Donnxruntime_CUDA_HOME=/opt/cuda \
-Donnxruntime_CUDNN_HOME=/usr \
-Donnxruntime_USE_NCCL=ON
}
_check() {
make test
# launch_test.py seems a script, and orttraining_* include BERT tests, which require the
# transformers package, and failed even if the latter is installed.
LD_LIBRARY_PATH="$PWD" pytest \
--ignore launch_test.py \
--ignore orttraining_run_bert_pretrain.py \
--ignore orttraining_run_frontend_batch_size_test.py
}
check() {
cd "$srcdir"/onnxruntime/build
_check
cd "$srcdir"/onnxruntime/build-cuda
# _check # requires machines with CUDA-compatible devices
}
_package() {
make install DESTDIR="$pkgdir"
python ../setup.py install --root="$pkgdir" --skip-build --optimize=1
PY_ORT_DIR="$(python -c 'import site; print(site.getsitepackages()[0])')/onnxruntime"
install -Ddm755 "$pkgdir"/usr/share/licenses/$pkgname
for f in LICENSE ThirdPartyNotices.txt ; do
ln -s "$PY_ORT_DIR/$f" "$pkgdir"/usr/share/licenses/$pkgname/$f
done
# already installed by `make install`, and not useful as this path is not looked up by the linker
rm -vf "$pkgdir/$PY_ORT_DIR"/capi/libonnxruntime_providers_*
}
package_python-onnxruntime() {
cd onnxruntime/build
_package
}
package_python-onnxruntime-cuda() {
depends+=(cuda cudnn nccl)
conflicts=(python-onnxruntime)
provides=("python-onnxruntime=$pkgver")
cd onnxruntime/build-cuda
_package
}
|