diff --git a/examples/end2end_tfkeras.py b/examples/end2end_tfkeras.py index 19da4d3..1150f1b 100644 --- a/examples/end2end_tfkeras.py +++ b/examples/end2end_tfkeras.py @@ -56,7 +56,7 @@ print(proc.stderr.decode('ascii')) ######################################## # Runs onnxruntime. -session = InferenceSession("simple_rnn.onnx") +session = InferenceSession("simple_rnn.onnx", providers=['CPUExecutionProvider']) got = session.run(None, {'input_1': input}) print(got[0]) diff --git a/examples/getting_started.py b/examples/getting_started.py index d00ea03..25bc3bc 100644 --- a/examples/getting_started.py +++ b/examples/getting_started.py @@ -27,7 +27,7 @@ print("Tensorflow result") print(f(a_val, b_val).numpy()) print("ORT result") -sess = ort.InferenceSession(onnx_model.SerializeToString()) +sess = ort.InferenceSession(onnx_model.SerializeToString(), providers=['CPUExecutionProvider']) res = sess.run(None, {'a': a_val, 'b': b_val}) print(res[0]) @@ -46,7 +46,7 @@ print("Keras result") print(model(x_val).numpy()) print("ORT result") -sess = ort.InferenceSession(onnx_model.SerializeToString()) +sess = ort.InferenceSession(onnx_model.SerializeToString(), providers=['CPUExecutionProvider']) res = sess.run(None, {'x': x_val}) print(res[0]) @@ -57,7 +57,7 @@ model.save("savedmodel") os.system("python -m tf2onnx.convert --saved-model savedmodel --output model.onnx --opset 13") print("ORT result") -sess = ort.InferenceSession("model.onnx") +sess = ort.InferenceSession("model.onnx", providers=['CPUExecutionProvider']) res = sess.run(None, {'dense_input': x_val}) print(res[0]) diff --git a/tests/test_einsum_helper.py b/tests/test_einsum_helper.py index 9ecb5c4..05c9fe3 100644 --- a/tests/test_einsum_helper.py +++ b/tests/test_einsum_helper.py @@ -27,7 +27,7 @@ class TestEinsum(Tf2OnnxBackendTestBase): def apply_einsum_sequence(self, seq, *inputs): names = ["X%d" % i for i in range(len(inputs))] onx = seq.to_onnx('Y', *names, opset=self.config.opset) - sess = InferenceSession(onx.SerializeToString()) + sess = InferenceSession(onx.SerializeToString(), providers=['CPUExecutionProvider']) inps = {n: i.astype(np.float32) for n, i in zip(names, inputs)} res = sess.run(None, inps) return res[0] diff --git a/tests/test_einsum_optimizers.py b/tests/test_einsum_optimizers.py index bd90131..55d1807 100644 --- a/tests/test_einsum_optimizers.py +++ b/tests/test_einsum_optimizers.py @@ -94,8 +94,8 @@ class EinsumOptimizerTests(Tf2OnnxBackendTestBase): new_model_proto = self.run_einsum_compare(["Y"], feed_dict, model_proto, catch_errors=catch_errors) - sess1 = InferenceSession(model_proto.SerializeToString()) - sess2 = InferenceSession(new_model_proto.SerializeToString()) + sess1 = InferenceSession(model_proto.SerializeToString(), providers=['CPUExecutionProvider']) + sess2 = InferenceSession(new_model_proto.SerializeToString(), providers=['CPUExecutionProvider']) got1 = sess1.run(None, feed_dict) got2 = sess2.run(None, feed_dict) assert_almost_equal(got1, got2)