diff options
author | Chih-Hsuan Yen | 2022-06-13 18:44:25 +0800 |
---|---|---|
committer | Chih-Hsuan Yen | 2022-06-13 18:44:25 +0800 |
commit | e1d6bdcfbcad9e9aadb6a7e6ae2087729288052e (patch) | |
tree | 4b06ca85ca57118eaee162cd7c00922620ad42dc | |
parent | ce2dec9980f902e665bb0bb66ca85afcd9b1cea6 (diff) | |
download | aur-e1d6bdcfbcad9e9aadb6a7e6ae2087729288052e.tar.gz |
update to 1.11.1
Fix tests failed after the default opset is upgraded from 9 to 13 [1].
Also switches from python-install to python-installer.
[1] https://github.com/onnx/tensorflow-onnx/pull/1946
-rw-r--r-- | .SRCINFO | 10 | ||||
-rw-r--r-- | PKGBUILD | 19 | ||||
-rw-r--r-- | onnxruntime.diff | 72 |
3 files changed, 91 insertions, 10 deletions
@@ -1,6 +1,6 @@ pkgbase = python-tf2onnx pkgdesc = Convert TensorFlow models to ONNX - pkgver = 1.10.1 + pkgver = 1.11.1 pkgrel = 1 url = https://github.com/onnx/tensorflow-onnx arch = any @@ -12,7 +12,7 @@ pkgbase = python-tf2onnx checkdepends = python-onnxruntime makedepends = python-setuptools makedepends = python-build - makedepends = python-install + makedepends = python-installer makedepends = python-wheel depends = python depends = python-tensorflow @@ -20,7 +20,9 @@ pkgbase = python-tf2onnx depends = python-onnx depends = python-requests depends = python-six - source = https://github.com/onnx/tensorflow-onnx/archive/v1.10.1/tf2onnx-v1.10.1.tar.gz - sha256sums = 60f342b8f5a67482a067b3fa7aa26110a806bba82ddd0cb188189e097de95835 + source = https://github.com/onnx/tensorflow-onnx/archive/v1.11.1/tf2onnx-v1.11.1.tar.gz + source = onnxruntime.diff + sha256sums = 3b41c22a1bd08f08521aab8b5a8f9351457dcd93b602b9e63fe811aaa9477b69 + sha256sums = 7e8ab46940aff7cac8d535436c8e201f37c66637416ae9b795706a4c3dd01232 pkgname = python-tf2onnx @@ -1,28 +1,35 @@ # Maintainer: Chih-Hsuan Yen <yan12125@archlinux.org> pkgname=python-tf2onnx -pkgver=1.10.1 +pkgver=1.11.1 pkgrel=1 pkgdesc='Convert TensorFlow models to ONNX' arch=(any) url='https://github.com/onnx/tensorflow-onnx' license=(MIT) depends=(python python-tensorflow python-numpy python-onnx python-requests python-six) -makedepends=(python-setuptools python-build python-install python-wheel) +makedepends=(python-setuptools python-build python-installer python-wheel) checkdepends=(python-pytest python-graphviz python-parameterized python-yaml python-onnxruntime) -source=("https://github.com/onnx/tensorflow-onnx/archive/v$pkgver/tf2onnx-v$pkgver.tar.gz") -sha256sums=('60f342b8f5a67482a067b3fa7aa26110a806bba82ddd0cb188189e097de95835') +source=("https://github.com/onnx/tensorflow-onnx/archive/v$pkgver/tf2onnx-v$pkgver.tar.gz" + "onnxruntime.diff") +sha256sums=('3b41c22a1bd08f08521aab8b5a8f9351457dcd93b602b9e63fe811aaa9477b69' + '7e8ab46940aff7cac8d535436c8e201f37c66637416ae9b795706a4c3dd01232') prepare() { cd tensorflow-onnx-$pkgver + sed -i -r 's#--cov\S+##' setup.cfg sed -i "s#'pytest-runner'##" setup.py + + # The latest upstream tag may not sync with the version file + echo $pkgver > VERSION_NUMBER + + patch -Np1 -i ../onnxruntime.diff } build() { cd tensorflow-onnx-$pkgver python -m build --wheel --no-isolation - python -m install --cache dist/*.whl } check() { @@ -32,6 +39,6 @@ check() { package() { cd tensorflow-onnx-$pkgver - python -m install --destdir="$pkgdir" --skip-build --verify-dependencies + python -m installer --destdir="$pkgdir" --compile-bytecode 0 --compile-bytecode 1 --compile-bytecode 2 dist/*.whl install -Dm644 LICENSE -t "$pkgdir"/usr/share/licenses/$pkgname } diff --git a/onnxruntime.diff b/onnxruntime.diff new file mode 100644 index 000000000000..3aca16422258 --- /dev/null +++ b/onnxruntime.diff @@ -0,0 +1,72 @@ +diff --git a/examples/end2end_tfkeras.py b/examples/end2end_tfkeras.py +index 19da4d3..1150f1b 100644 +--- a/examples/end2end_tfkeras.py ++++ b/examples/end2end_tfkeras.py +@@ -56,7 +56,7 @@ print(proc.stderr.decode('ascii')) + + ######################################## + # Runs onnxruntime. +-session = InferenceSession("simple_rnn.onnx") ++session = InferenceSession("simple_rnn.onnx", providers=['CPUExecutionProvider']) + got = session.run(None, {'input_1': input}) + print(got[0]) + +diff --git a/examples/getting_started.py b/examples/getting_started.py +index d00ea03..25bc3bc 100644 +--- a/examples/getting_started.py ++++ b/examples/getting_started.py +@@ -27,7 +27,7 @@ print("Tensorflow result") + print(f(a_val, b_val).numpy()) + + print("ORT result") +-sess = ort.InferenceSession(onnx_model.SerializeToString()) ++sess = ort.InferenceSession(onnx_model.SerializeToString(), providers=['CPUExecutionProvider']) + res = sess.run(None, {'a': a_val, 'b': b_val}) + print(res[0]) + +@@ -46,7 +46,7 @@ print("Keras result") + print(model(x_val).numpy()) + + print("ORT result") +-sess = ort.InferenceSession(onnx_model.SerializeToString()) ++sess = ort.InferenceSession(onnx_model.SerializeToString(), providers=['CPUExecutionProvider']) + res = sess.run(None, {'x': x_val}) + print(res[0]) + +@@ -57,7 +57,7 @@ model.save("savedmodel") + os.system("python -m tf2onnx.convert --saved-model savedmodel --output model.onnx --opset 13") + + print("ORT result") +-sess = ort.InferenceSession("model.onnx") ++sess = ort.InferenceSession("model.onnx", providers=['CPUExecutionProvider']) + res = sess.run(None, {'dense_input': x_val}) + print(res[0]) + +diff --git a/tests/test_einsum_helper.py b/tests/test_einsum_helper.py +index 9ecb5c4..05c9fe3 100644 +--- a/tests/test_einsum_helper.py ++++ b/tests/test_einsum_helper.py +@@ -27,7 +27,7 @@ class TestEinsum(Tf2OnnxBackendTestBase): + def apply_einsum_sequence(self, seq, *inputs):
+ names = ["X%d" % i for i in range(len(inputs))]
+ onx = seq.to_onnx('Y', *names, opset=self.config.opset)
+- sess = InferenceSession(onx.SerializeToString())
++ sess = InferenceSession(onx.SerializeToString(), providers=['CPUExecutionProvider'])
+ inps = {n: i.astype(np.float32) for n, i in zip(names, inputs)}
+ res = sess.run(None, inps)
+ return res[0]
+diff --git a/tests/test_einsum_optimizers.py b/tests/test_einsum_optimizers.py +index bd90131..55d1807 100644 +--- a/tests/test_einsum_optimizers.py ++++ b/tests/test_einsum_optimizers.py +@@ -94,8 +94,8 @@ class EinsumOptimizerTests(Tf2OnnxBackendTestBase): + new_model_proto = self.run_einsum_compare(["Y"], feed_dict, model_proto, + catch_errors=catch_errors) + +- sess1 = InferenceSession(model_proto.SerializeToString()) +- sess2 = InferenceSession(new_model_proto.SerializeToString()) ++ sess1 = InferenceSession(model_proto.SerializeToString(), providers=['CPUExecutionProvider']) ++ sess2 = InferenceSession(new_model_proto.SerializeToString(), providers=['CPUExecutionProvider']) + got1 = sess1.run(None, feed_dict) + got2 = sess2.run(None, feed_dict) + assert_almost_equal(got1, got2) |