diff options
author | Chih-Hsuan Yen | 2022-07-23 20:11:18 +0800 |
---|---|---|
committer | Chih-Hsuan Yen | 2022-07-23 20:11:18 +0800 |
commit | c6eddeba9fdb8a5f5e18ec929b1bb3bfc92b06bd (patch) | |
tree | d8a6ed71cbb7e9af603d811917308a5d13451306 | |
parent | 8d347bed37e512e7a9bdfbeea35d5cb47fcfb4a9 (diff) | |
download | aur-c6eddeba9fdb8a5f5e18ec929b1bb3bfc92b06bd.tar.gz |
update to 1.12.0
* Update Python dependencies following upstream [1].
* Rebase patches for devendoring after upstream changes [2].
* Avoid wheel.vendored, which is needed since [3] while devendored in
Arch [4].
[1] https://github.com/microsoft/onnxruntime/pull/11522
[2] https://github.com/microsoft/onnxruntime/pull/11146
[3] https://github.com/microsoft/onnxruntime/pull/11834
[4] https://github.com/archlinux/svntogit-community/commit/e691288eda92fb5982ac5ac18f6459c5da560d7a
-rw-r--r-- | .SRCINFO | 20 | ||||
-rw-r--r-- | PKGBUILD | 12 | ||||
-rw-r--r-- | build-fixes.patch | 46 | ||||
-rw-r--r-- | install-orttraining-files.diff | 16 |
4 files changed, 50 insertions, 44 deletions
@@ -1,6 +1,6 @@ pkgbase = python-onnxruntime pkgdesc = Cross-platform, high performance scoring engine for ML models - pkgver = 1.11.1 + pkgver = 1.12.0 pkgrel = 1 url = https://github.com/microsoft/onnxruntime arch = x86_64 @@ -20,26 +20,26 @@ pkgbase = python-onnxruntime makedepends = gcc11 depends = nsync depends = re2 - depends = python-flatbuffers - depends = python-numpy - depends = python-protobuf depends = openmpi depends = onednn depends = libprotobuf-lite.so + depends = python-coloredlogs + depends = python-flatbuffers + depends = python-numpy + depends = python-packaging + depends = python-protobuf + depends = python-sympy optdepends = python-onnx: for the backend API, quantization, orttraining, transformers and various tools - optdepends = python-coloredlogs: for transformers optdepends = python-psutil: for transformers optdepends = python-py-cpuinfo: for transformers optdepends = python-py3nvml: for transformers - optdepends = python-packaging: for transformers and various tools optdepends = python-transformers: for transformers optdepends = python-scipy: for transformers and various tools optdepends = python-pytorch: for transformers, orttraining and various tools optdepends = python-cerberus: for orttraining optdepends = python-h5py: for orttraining - optdepends = python-sympy: for transformers and various tools options = !lto - source = git+https://github.com/microsoft/onnxruntime#tag=v1.11.1 + source = git+https://github.com/microsoft/onnxruntime#tag=v1.12.0 source = git+https://github.com/onnx/onnx.git source = git+https://github.com/dcleblanc/SafeInt.git source = git+https://github.com/tensorflow/tensorboard.git @@ -56,8 +56,8 @@ pkgbase = python-onnxruntime sha512sums = SKIP sha512sums = SKIP sha512sums = SKIP - sha512sums = 80ea85ea20bbbdec7991f965a66b627a5f42828bc0c72be0913078d927833a82402fb1af6c5c9f6ecae861b45582fa42c98ce83b02768e4bf875ab89dd1c607c - sha512sums = 06a002361cc324184d0bfcb520b472f57749c0537329f0e0dee833cc7fce2f08b14590b77bc0211422dfb933dbef6f249f19939f9e0df465c48ee8fc7827e31c + sha512sums = ab0446ede08e528ca631a73e536ff42009ee8f152972d37050b2f9b44b3d1c06d19bd8a91c31b09c26f5db1482a699b8fe2c221b78199199dfa245728856b196 + sha512sums = 7d55b0d4232183a81c20a5049f259872150536eed799d81a15e7f10b5c8b5279b443ba96d7b97c0e4338e95fc18c9d6f088e348fc7002256ee7170d25b27d80d sha512sums = 6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08 pkgname = python-onnxruntime @@ -6,28 +6,26 @@ pkgbase=python-onnxruntime # Not split DNNL EP to another package as it's needed unconditionally at runtime if built at compile time # https://github.com/microsoft/onnxruntime/blob/v1.9.1/onnxruntime/python/onnxruntime_pybind_state.cc#L533 pkgname=(python-onnxruntime) -pkgver=1.11.1 +pkgver=1.12.0 pkgdesc='Cross-platform, high performance scoring engine for ML models' pkgrel=1 arch=(x86_64) url='https://github.com/microsoft/onnxruntime' license=(MIT) -depends=(nsync re2 python-flatbuffers python-numpy python-protobuf openmpi onednn libprotobuf-lite.so) +depends=(nsync re2 openmpi onednn libprotobuf-lite.so + python-coloredlogs python-flatbuffers python-numpy python-packaging python-protobuf python-sympy) makedepends=(git cmake pybind11 python-setuptools nlohmann-json chrono-date boost eigen flatbuffers) optdepends=( # https://github.com/microsoft/onnxruntime/pull/9969 'python-onnx: for the backend API, quantization, orttraining, transformers and various tools' - 'python-coloredlogs: for transformers' # also used by TensorRT tools, but we don't build for it, anyway 'python-psutil: for transformers' 'python-py-cpuinfo: for transformers' 'python-py3nvml: for transformers' - 'python-packaging: for transformers and various tools' 'python-transformers: for transformers' 'python-scipy: for transformers and various tools' 'python-pytorch: for transformers, orttraining and various tools' 'python-cerberus: for orttraining' 'python-h5py: for orttraining' - 'python-sympy: for transformers and various tools' ) # not de-vendored libraries # onnx: needs shared libonnx (https://github.com/onnx/onnx/issues/3030) @@ -48,8 +46,8 @@ sha512sums=('SKIP' 'SKIP' 'SKIP' 'SKIP' - '80ea85ea20bbbdec7991f965a66b627a5f42828bc0c72be0913078d927833a82402fb1af6c5c9f6ecae861b45582fa42c98ce83b02768e4bf875ab89dd1c607c' - '06a002361cc324184d0bfcb520b472f57749c0537329f0e0dee833cc7fce2f08b14590b77bc0211422dfb933dbef6f249f19939f9e0df465c48ee8fc7827e31c' + 'ab0446ede08e528ca631a73e536ff42009ee8f152972d37050b2f9b44b3d1c06d19bd8a91c31b09c26f5db1482a699b8fe2c221b78199199dfa245728856b196' + '7d55b0d4232183a81c20a5049f259872150536eed799d81a15e7f10b5c8b5279b443ba96d7b97c0e4338e95fc18c9d6f088e348fc7002256ee7170d25b27d80d' '6735c7aca2ba2f1f2a5286eb064125bf7f2c68a575d572dd157769d15778ff3e717b3a53d696c767748229f23ee6c3a7c82679df1d86283d7c4dd0ec9103ae08') # CUDA seems not working with LTO options+=('!lto') diff --git a/build-fixes.patch b/build-fixes.patch index 25b410c9c005..614a39ce3b8f 100644 --- a/build-fixes.patch +++ b/build-fixes.patch @@ -2,23 +2,17 @@ diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index a027c69e0..eb7608518 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt -@@ -841,7 +841,9 @@ add_library(safeint_interface INTERFACE) - target_include_directories(safeint_interface INTERFACE ${SAFEINT_INCLUDE_DIR}) +@@ -733,7 +733,7 @@ - if (onnxruntime_PREFER_SYSTEM_LIB) -- find_package(boost_mp11) -+ # boost on Arch does not support find_package(boost_mp11) -+ add_library(boost_mp11 INTERFACE) -+ add_library(Boost::mp11 ALIAS boost_mp11) + if (NOT WIN32) + if (onnxruntime_PREFER_SYSTEM_LIB) +- find_package(nsync) ++ find_package(nsync_cpp) + endif() + if (TARGET nsync_cpp) # linking error with nsync_FOUND (why?) + message("Use nsync from preinstalled system lib") +@@ -764,9 +765,11 @@ if(onnxruntime_DISABLE_EXCEPTIONS) endif() - if (NOT TARGET Boost::mp11) - add_subdirectory(external/mp11 EXCLUDE_FROM_ALL) -@@ -764,10 +765,12 @@ if(onnxruntime_DISABLE_EXCEPTIONS) - - set(JSON_BuildTests OFF CACHE INTERNAL "") - set(JSON_Install OFF CACHE INTERNAL "") --add_subdirectory(external/json EXCLUDE_FROM_ALL) -+find_package(nlohmann_json REQUIRED) if (onnxruntime_PREFER_SYSTEM_LIB) - find_package(re2) @@ -26,14 +20,28 @@ index a027c69e0..eb7608518 100644 + pkg_check_modules(RE2 IMPORTED_TARGET re2) + add_library(re2::re2 ALIAS PkgConfig::RE2) endif() - if (NOT TARGET re2::re2) +-if (re2_FOUND) ++if (TARGET re2::re2) + message("Use re2 from preinstalled system lib") + else() add_subdirectory(external/re2 EXCLUDE_FROM_ALL) @@ -1421,7 +1421,7 @@ - find_package(Flatbuffers) endif() if (Flatbuffers_FOUND) + message("Use flatbuffers from preinstalled system lib") - add_library(flatbuffers ALIAS flatbuffers::flatbuffers) + add_library(flatbuffers ALIAS flatbuffers::flatbuffers_shared) else() - add_subdirectory(external/flatbuffers EXCLUDE_FROM_ALL) - endif() + message("Use flatbuffers from submodule") + # We do not need to build flatc for iOS or Android Cross Compile +--- a/setup.py 2022-07-22 17:00:19.638893453 +0800 ++++ b/setup.py 2022-07-22 17:02:00.686317628 +0800 +@@ -16,7 +16,7 @@ + + from setuptools import Extension, setup + from setuptools.command.install import install as InstallCommandBase +-from wheel.vendored.packaging.tags import sys_tags ++from packaging.tags import sys_tags + + nightly_build = False + package_name = "onnxruntime" diff --git a/install-orttraining-files.diff b/install-orttraining-files.diff index e95601fcd183..7df2871aa219 100644 --- a/install-orttraining-files.diff +++ b/install-orttraining-files.diff @@ -1,18 +1,18 @@ --- a/setup.py 2021-12-29 22:44:09.924917943 +0800 +++ b/setup.py 2021-12-29 22:49:16.216878004 +0800 @@ -355,7 +355,7 @@ - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS']) + if not enable_training: + classifiers.extend(["Operating System :: Microsoft :: Windows", "Operating System :: MacOS"]) -if enable_training: +if True: - packages.extend(['onnxruntime.training', - 'onnxruntime.training.amp', - 'onnxruntime.training.optim', + packages.extend( + [ + "onnxruntime.training", @@ -373,6 +373,7 @@ - package_data['onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.torch_gpu_allocator'] = ['*.cc'] - package_data['onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.fused_ops'] = \ - ['*.cpp', '*.cu', '*.cuh', '*.h'] + '*.cuh', + '*.h' + ] +if enable_training: requirements_file = "requirements-training.txt" # with training, we want to follow this naming convention: |