summarylogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.SRCINFO22
-rw-r--r--PKGBUILD53
-rw-r--r--gcc1.diff81
-rw-r--r--gcc2.diff390
-rw-r--r--gcc3.diff31
-rw-r--r--py37.diff220
-rw-r--r--python37.patch541
7 files changed, 1325 insertions, 13 deletions
diff --git a/.SRCINFO b/.SRCINFO
index f358fce55cec..995677b4c985 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
pkgbase = tensorflow-computecpp
pkgdesc = Library for computation using data flow graphs for scalable machine learning (backend with ComputeCpp)
pkgver = 1.9
- pkgrel = 3
+ pkgrel = 5
epoch = 1
url = https://github.com/codeplaysoftware/tensorflow
arch = x86_64
@@ -9,16 +9,30 @@ pkgbase = tensorflow-computecpp
makedepends = git
makedepends = opencl-icd-loader
makedepends = computecpp
- makedepends = bazel
+ makedepends = cmake
+ makedepends = opencl-headers
makedepends = python-numpy
makedepends = python-pip
makedepends = python-wheel
makedepends = python-setuptools
+ makedepends = java-environment=8
depends = opencl-icd-loader
depends = computecpp
options = !ccache
- source = git+https://github.com/codeplaysoftware/tensorflow#tag=computecpp_1.0.1
- sha512sums = SKIP
+ source = git+https://github.com/codeplaysoftware/tensorflow
+ source = https://archive.org/download/archlinux_pkg_bazel/bazel-0.17.2-1-x86_64.pkg.tar.xz
+ source = python37.patch
+ source = py37.diff
+ source = gcc1.diff
+ source = gcc2.diff
+ source = gcc3.diff
+ sha256sums = SKIP
+ sha256sums = 758e10caff4c1cb496d1cf49d6f4da2969b610b174276fb734b8502686d07ddd
+ sha256sums = ef54b3783a05b5604cd8f448136567686806ad3a5759978f48549256807a8394
+ sha256sums = b3997091bc7a32f9e8c062a88e9148273090ebf66aeebb5dc055baa41b7aae7e
+ sha256sums = 7d9f32a46cac83ec1a7308ac380226cdf40f98830c869bcdf5feb7bf110abf9a
+ sha256sums = 10de738141852cfebae9847b746ae9b58f3b3985561cccede929d8fbdba93551
+ sha256sums = 742abe5d8bfd3f7ce33778a08cbb233337db56238d11ac2ad07171b0d6097bfb
pkgname = tensorflow-computecpp
provides = tensorflow
diff --git a/PKGBUILD b/PKGBUILD
index f8668f0bbfa6..0d715683455a 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -3,23 +3,36 @@
pkgbase=tensorflow-computecpp
pkgname=(tensorflow-computecpp python-tensorflow-computecpp)
pkgver=1.9
-pkgrel=3
+pkgrel=5
pkgdesc="Library for computation using data flow graphs for scalable machine learning (backend with ComputeCpp)"
url="https://github.com/codeplaysoftware/tensorflow"
epoch=1
license=('APACHE')
arch=('x86_64')
depends=(opencl-icd-loader computecpp)
-makedepends=(git opencl-icd-loader computecpp bazel python-numpy python-pip python-wheel python-setuptools)
+makedepends=(git opencl-icd-loader computecpp cmake opencl-headers
+ python-numpy python-pip python-wheel python-setuptools java-environment=8)
options=(!ccache)
-source=("git+${url}#tag=computecpp_1.0.1")
-sha512sums=('SKIP')
+source=("git+${url}"
+ #"https://github.com/bazelbuild/bazel/releases/download/0.17.2/bazel-0.17.2-dist.zip"
+ "https://archive.org/download/archlinux_pkg_bazel/bazel-0.17.2-1-x86_64.pkg.tar.xz"
+ python37.patch
+ py37.diff
+ gcc1.diff
+ gcc2.diff
+ gcc3.diff)
+sha256sums=('SKIP'
+ '758e10caff4c1cb496d1cf49d6f4da2969b610b174276fb734b8502686d07ddd'
+ 'ef54b3783a05b5604cd8f448136567686806ad3a5759978f48549256807a8394'
+ 'b3997091bc7a32f9e8c062a88e9148273090ebf66aeebb5dc055baa41b7aae7e'
+ '7d9f32a46cac83ec1a7308ac380226cdf40f98830c869bcdf5feb7bf110abf9a'
+ '10de738141852cfebae9847b746ae9b58f3b3985561cccede929d8fbdba93551'
+ '742abe5d8bfd3f7ce33778a08cbb233337db56238d11ac2ad07171b0d6097bfb')
prepare() {
# These environment variables influence the behavior of the configure call below.
export PYTHON_BIN_PATH=/usr/bin/python
export USE_DEFAULT_PYTHON_LIB_PATH=1
- export CC_OPT_FLAGS="-march=native -mfpmath=sse -O2 -pipe"
export TF_DOWNLOAD_CLANG=0
export TF_CUDA_CLANG=0
export TF_NEED_CUDA=0
@@ -39,18 +52,40 @@ prepare() {
export TF_NEED_COMPUTECPP=1
export COMPUTECPP_TOOLKIT_PATH=/opt/ComputeCpp-CE
export COMPUTE=:0
+# Device-sensitive magic, these should work on as much hardware as possible
+ export TF_USE_HALF_SYCL=0
+ export TF_USE_DOUBLE_SYCL=0
+ export TF_SYCL_BITCODE_TARGET=spir64
+ export CC_OPT_FLAGS="-march=native -mfpmath=sse -O2 -pipe" # -sycl-compress-name for some amd cards
# make sure the proxy variables are in all caps, otherwise bazel ignores them
export HTTP_PROXY=`echo $http_proxy | sed -e 's/\/$//'`
export HTTPS_PROXY=`echo $https_proxy | sed -e 's/\/$//'`
+
+ cd ${srcdir}/tensorflow
+ git apply --index ../python37.patch
+ git apply --index --whitespace=nowarn ../py37.diff
+ git apply --index --whitespace=nowarn ../gcc1.diff
+ git apply --index --whitespace=nowarn ../gcc2.diff
+ git apply --index --whitespace=nowarn ../gcc3.diff
}
build() {
- # _bazel_09_fix="--incompatible_load_argument_is_label=false"
+ # Build bazel
+ echo "Please note: currently, bazel version <0.18 is required to build this package."
+ echo "Fixing that for you" # "Building it temporarily..."
+ cd "$srcdir"
+ # ./compile.sh
+ export PATH=`pwd`/usr/bin:$PATH
+
cd ${srcdir}/tensorflow
+ if [ ! -f .bazelrc ]; then # configure should be in prepare, but bazel has to be built first atm
+ ./configure
+ fi
- ./configure
- bazel build --config=opt --config=sycl //tensorflow:libtensorflow.so //tensorflow/tools/pip_package:build_pip_package # ${_bazel_09_fix}
+# Please take notice this requires at least 7GB of swap/disk space and 0.8+(3.2*threads)GB of RAM to build
+ bazel build -c opt --config=sycl //tensorflow:libtensorflow.so \
+ //tensorflow/tools/pip_package:build_pip_package # --jobs 1 --verbose_failures
bazel-bin/tensorflow/tools/pip_package/build_pip_package ${srcdir}/tmp
}
@@ -77,7 +112,7 @@ package_python-tensorflow-computecpp() {
cd ${srcdir}/tensorflow
WHEEL_PACKAGE=$(find ${srcdir}/tmp -name "tensor*.whl")
- pip install --ignore-installed --upgrade --root $pkgdir/ $WHEEL_PACKAGE --no-dependencies
+ pip install --ignore-installed --upgrade --root $pkgdir/ $WHEEL_PACKAGE --no-dependencies --no-warn-script-location
# tensorboard has been separated from upstream but they still install it with
# tensorflow. I don't know what kind of sense that makes but we have to clean
diff --git a/gcc1.diff b/gcc1.diff
new file mode 100644
index 000000000000..3c06a0103b19
--- /dev/null
+++ b/gcc1.diff
@@ -0,0 +1,81 @@
+diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
+index 44a95d6bad..0fb9e9fd29 100644
+--- a/tensorflow/workspace.bzl
++++ b/tensorflow/workspace.bzl
+@@ -461,6 +461,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
+ ],
+ sha256 = "895b31310e718a61f7335759a778c068a6edde1c089883598a0830cbb7075673",
+ strip_prefix = "grpc-d184fa229d75d336aedea0041bd59cb93e7e267f",
++ patch_file = clean_dep("//third_party/grpc:gcc9.patch"),
+ )
+
+
+diff --git a/third_party/grpc/gcc9.patch b/third_party/grpc/gcc9.patch
+new file mode 100644
+index 0000000000..9a3d737ab0
+--- /dev/null
++++ b/third_party/grpc/gcc9.patch
+@@ -0,0 +1,63 @@
++diff -ruN a/src/core/lib/gpr/log_linux.cc b/src/core/lib/gpr/log_linux.cc
++--- a/src/core/lib/gpr/log_linux.cc 2018-04-13 17:28:21.000000000 +0200
+++++ b/src/core/lib/gpr/log_linux.cc 2019-10-23 21:32:10.956028386 +0200
++@@ -40,7 +40,7 @@
++ #include <time.h>
++ #include <unistd.h>
++
++-static long gettid(void) { return syscall(__NR_gettid); }
+++static long sys_gettid(void) { return syscall(__NR_gettid); }
++
++ void gpr_log(const char* file, int line, gpr_log_severity severity,
++ const char* format, ...) {
++@@ -70,7 +70,7 @@
++ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
++ struct tm tm;
++ static __thread long tid = 0;
++- if (tid == 0) tid = gettid();
+++ if (tid == 0) tid = sys_gettid();
++
++ timer = static_cast<time_t>(now.tv_sec);
++ final_slash = strrchr(args->file, '/');
++diff -ruN a/src/core/lib/gpr/log_posix.cc b/src/core/lib/gpr/log_posix.cc
++--- a/src/core/lib/gpr/log_posix.cc 2018-04-13 17:28:21.000000000 +0200
+++++ b/src/core/lib/gpr/log_posix.cc 2019-10-23 21:30:10.491136918 +0200
++@@ -30,7 +30,7 @@
++ #include <string.h>
++ #include <time.h>
++
++-static intptr_t gettid(void) { return (intptr_t)pthread_self(); }
+++static intptr_t sys_gettid(void) { return (intptr_t)pthread_self(); }
++
++ void gpr_log(const char* file, int line, gpr_log_severity severity,
++ const char* format, ...) {
++@@ -85,7 +85,7 @@
++ char* prefix;
++ gpr_asprintf(&prefix, "%s%s.%09d %7tu %s:%d]",
++ gpr_log_severity_string(args->severity), time_buffer,
++- (int)(now.tv_nsec), gettid(), display_file, args->line);
+++ (int)(now.tv_nsec), sys_gettid(), display_file, args->line);
++
++ fprintf(stderr, "%-70s %s\n", prefix, args->message);
++ gpr_free(prefix);
++diff -ruN a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
++--- a/src/core/lib/iomgr/ev_epollex_linux.cc 2018-04-13 17:28:21.000000000 +0200
+++++ b/src/core/lib/iomgr/ev_epollex_linux.cc 2019-10-23 21:31:24.020053353 +0200
++@@ -986,7 +986,7 @@
++ }
++
++ #ifndef NDEBUG
++-static long gettid(void) { return syscall(__NR_gettid); }
+++static long sys_gettid(void) { return syscall(__NR_gettid); }
++ #endif
++
++ /* pollset->mu lock must be held by the caller before calling this.
++@@ -1006,7 +1006,7 @@
++ #define WORKER_PTR (&worker)
++ #endif
++ #ifndef NDEBUG
++- WORKER_PTR->originator = gettid();
+++ WORKER_PTR->originator = sys_gettid();
++ #endif
++ if (grpc_polling_trace.enabled()) {
++ gpr_log(GPR_DEBUG,
diff --git a/gcc2.diff b/gcc2.diff
new file mode 100644
index 000000000000..e322d6ef8acd
--- /dev/null
+++ b/gcc2.diff
@@ -0,0 +1,390 @@
+diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
+index 0fb9e9fd29..eb5b5796c4 100644
+--- a/tensorflow/workspace.bzl
++++ b/tensorflow/workspace.bzl
+@@ -397,6 +397,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
+ ],
+ sha256 = "202cd7ddf8473343c1fccc735b2b7c0059f070f9584ae06daa3b11bb0c04778f",
+ strip_prefix = "nsync-912045960b78dcbbdbe1bc677c5f8f9940f142cc",
++ patch_file = clean_dep("//third_party:gcc9.patch"),
+ )
+
+ tf_http_archive(
+diff --git a/third_party/gcc9.patch b/third_party/gcc9.patch
+new file mode 100644
+index 0000000000..9dba157b79
+--- /dev/null
++++ b/third_party/gcc9.patch
+@@ -0,0 +1,372 @@
++From 436617053d0f39a1019a371c3a9aa599b3cb2cea Mon Sep 17 00:00:00 2001
++From: Mike Burrows <m3b@google.com>
++Date: Tue, 3 Sep 2019 16:28:02 -0700
++Subject: [PATCH] Suppress compiler warnings on newer compilers that have
++ additional checks.
++
++GCC 8.0 in C++ mode introduced a warning if memset() is used to zero a struct,
++even if it POD. This causes the compiler to be chatty if nsync is build using g++.
++Most of the changes are to cast the pointer argument to memset() to "void *" to avoid
++the warning.
++
++GCC 8.0 in C++ mode also started warning if a C-style cast changed the
++"constness" of the data. I'd accidentally done this in testing/testing.c; now fixed.
++
++NetBSD has started using C99-style designated initializers in the standard pthread
++initializer constants even if using C89; clang 8.0 warns about this. This
++change turns the warning off in the netbsd/clang Makefiles.
++---
++ builds/x86_32.netbsd.clang.atm-c11/Makefile | 2 +-
++ builds/x86_32.netbsd.clang/Makefile | 2 +-
++ internal/counter.c | 2 +-
++ internal/cv.c | 2 +-
++ internal/mu.c | 2 +-
++ internal/note.c | 2 +-
++ testing/cv_mu_timeout_stress_test.c | 6 +++---
++ testing/cv_test.c | 6 +++---
++ testing/cv_wait_example_test.c | 2 +-
++ testing/mu_starvation_test.c | 2 +-
++ testing/mu_test.c | 16 ++++++++--------
++ testing/mu_wait_example_test.c | 2 +-
++ testing/mu_wait_test.c | 2 +-
++ testing/once_test.c | 2 +-
++ testing/pingpong_test.c | 2 +-
++ testing/testing.c | 6 +++---
++ testing/testing.h | 1 +
++ 17 files changed, 30 insertions(+), 29 deletions(-)
++
++diff --git a/builds/x86_32.netbsd.clang.atm-c11/Makefile b/builds/x86_32.netbsd.clang.atm-c11/Makefile
++index 980a8a6..4ea2b37 100644
++--- a/builds/x86_32.netbsd.clang.atm-c11/Makefile
+++++ b/builds/x86_32.netbsd.clang.atm-c11/Makefile
++@@ -1,6 +1,6 @@
++ CC=clang
++ PLATFORM_CPPFLAGS=-DNSYNC_ATOMIC_C11 -I../../platform/c11 -D_POSIX_C_SOURCE=200809L -I../../platform/clang -I../../platform/netbsd -I../../platform/x86_32 -I../../platform/posix -pthread
++-PLATFORM_CFLAGS=-Werror -Wall -Wextra -ansi -pedantic -Wno-unneeded-internal-declaration
+++PLATFORM_CFLAGS=-Werror -Wall -Wextra -ansi -pedantic -Wno-unneeded-internal-declaration -Wno-c99-extensions
++ PLATFORM_LDFLAGS=-pthread
++ MKDEP=${CC} -M
++ PLATFORM_C=../../platform/posix/src/nsync_semaphore_mutex.c ../../platform/posix/src/per_thread_waiter.c ../../platform/posix/src/yield.c ../../platform/posix/src/time_rep.c ../../platform/posix/src/nsync_panic.c
++diff --git a/builds/x86_32.netbsd.clang/Makefile b/builds/x86_32.netbsd.clang/Makefile
++index 46557fd..e761ed1 100644
++--- a/builds/x86_32.netbsd.clang/Makefile
+++++ b/builds/x86_32.netbsd.clang/Makefile
++@@ -1,6 +1,6 @@
++ CC=clang
++ PLATFORM_CPPFLAGS=-D_POSIX_C_SOURCE=200809L -I../../platform/clang -I../../platform/netbsd -I../../platform/x86_32 -I../../platform/posix -pthread
++-PLATFORM_CFLAGS=-Werror -Wall -Wextra -ansi -pedantic -Wno-unneeded-internal-declaration
+++PLATFORM_CFLAGS=-Werror -Wall -Wextra -ansi -pedantic -Wno-unneeded-internal-declaration -Wno-c99-extensions
++ PLATFORM_LDFLAGS=-pthread
++ MKDEP=${CC} -M
++ PLATFORM_C=../../platform/posix/src/nsync_semaphore_mutex.c ../../platform/posix/src/per_thread_waiter.c ../../platform/posix/src/yield.c ../../platform/posix/src/time_rep.c ../../platform/posix/src/nsync_panic.c
++diff --git a/internal/counter.c b/internal/counter.c
++index 59b3828..83af450 100644
++--- a/internal/counter.c
+++++ b/internal/counter.c
++@@ -36,7 +36,7 @@ struct nsync_counter_s_ {
++ nsync_counter nsync_counter_new (uint32_t value) {
++ nsync_counter c = (nsync_counter) malloc (sizeof (*c));
++ if (c != NULL) {
++- memset (c, 0, sizeof (*c));
+++ memset ((void *) c, 0, sizeof (*c));
++ ATM_STORE (&c->value, value);
++ }
++ return (c);
++diff --git a/internal/cv.c b/internal/cv.c
++index 4aeb5ff..1d63bb2 100644
++--- a/internal/cv.c
+++++ b/internal/cv.c
++@@ -27,7 +27,7 @@ NSYNC_CPP_START_
++
++ /* Initialize *cv. */
++ void nsync_cv_init (nsync_cv *cv) {
++- memset (cv, 0, sizeof (*cv));
+++ memset ((void *) cv, 0, sizeof (*cv));
++ }
++
++ /* Wake the cv waiters in the circular list pointed to by
++diff --git a/internal/mu.c b/internal/mu.c
++index 8dbe648..83d6595 100644
++--- a/internal/mu.c
+++++ b/internal/mu.c
++@@ -27,7 +27,7 @@ NSYNC_CPP_START_
++
++ /* Initialize *mu. */
++ void nsync_mu_init (nsync_mu *mu) {
++- memset (mu, 0, sizeof (*mu));
+++ memset ((void *) mu, 0, sizeof (*mu));
++ }
++
++ /* Release the mutex spinlock. */
++diff --git a/internal/note.c b/internal/note.c
++index 6181c09..6e526a4 100644
++--- a/internal/note.c
+++++ b/internal/note.c
++@@ -171,7 +171,7 @@ nsync_note nsync_note_new (nsync_note parent,
++ nsync_time abs_deadline) {
++ nsync_note n = (nsync_note) malloc (sizeof (*n));
++ if (n != NULL) {
++- memset (n, 0, sizeof (*n));
+++ memset ((void *) n, 0, sizeof (*n));
++ nsync_dll_init_ (&n->parent_child_link, n);
++ set_expiry_time (n, abs_deadline);
++ if (!nsync_note_is_notified (n) && parent != NULL) {
++diff --git a/testing/cv_mu_timeout_stress_test.c b/testing/cv_mu_timeout_stress_test.c
++index 934152e..64e5570 100644
++--- a/testing/cv_mu_timeout_stress_test.c
+++++ b/testing/cv_mu_timeout_stress_test.c
++@@ -494,7 +494,7 @@ static void test_cv_timeout_stress (testing t) {
++ nsync_time deadline;
++ deadline = nsync_time_add (nsync_time_now (), nsync_time_ms (5000));
++ do {
++- memset (&s, 0, sizeof (s));
+++ memset ((void *) &s, 0, sizeof (s));
++ s.loop_count = loop_count;
++ s.cv_threads_per_value = 4;
++ s.cv_reader_threads_per_value = 2;
++@@ -514,7 +514,7 @@ static void test_mu_timeout_stress (testing t) {
++ nsync_time deadline;
++ deadline = nsync_time_add (nsync_time_now (), nsync_time_ms (5000));
++ do {
++- memset (&s, 0, sizeof (s));
+++ memset ((void *) &s, 0, sizeof (s));
++ s.loop_count = loop_count;
++ s.cv_threads_per_value = 0;
++ s.cv_reader_threads_per_value = 0;
++@@ -534,7 +534,7 @@ static void test_mu_cv_timeout_stress (testing t) {
++ nsync_time deadline;
++ deadline = nsync_time_add (nsync_time_now (), nsync_time_ms (5000));
++ do {
++- memset (&s, 0, sizeof (s));
+++ memset ((void *) &s, 0, sizeof (s));
++ s.loop_count = loop_count;
++ s.cv_threads_per_value = 4;
++ s.cv_reader_threads_per_value = 1;
++diff --git a/testing/cv_test.c b/testing/cv_test.c
++index 22d3bb2..b0aa31a 100644
++--- a/testing/cv_test.c
+++++ b/testing/cv_test.c
++@@ -40,7 +40,7 @@ static cv_queue *cv_queue_new (int limit) {
++ cv_queue *q;
++ int size = offsetof (struct cv_queue_s, data) + sizeof (q->data[0]) * limit;
++ q = (cv_queue *) malloc (size);
++- memset (q, 0, size);
+++ memset ((void *) q, 0, size);
++ q->limit = limit;
++ return (q);
++ }
++@@ -470,7 +470,7 @@ static void test_cv_debug (testing t) {
++ int buflen;
++ struct debug_state xs;
++ struct debug_state *s = &xs;
++- memset (s, 0, sizeof (*s));
+++ memset ((void *) s, 0, sizeof (*s));
++
++ /* Use nsync_*_debugger to check that they work. */
++ tmp = nsync_mu_debugger (&s->mu);
++@@ -697,7 +697,7 @@ static void test_cv_transfer (testing t) {
++ TEST_LOG (t, ("transfer waiters %d wakeup_type %d cv_writers %d ccs_reader %d\n",
++ waiters, wakeup_type, cv_writers, ccs_reader));
++ }
++- memset (cvt, 0, sizeof (*cvt));
+++ memset ((void *) cvt, 0, sizeof (*cvt));
++
++ /* Start the waiter threads that use condition variables. */
++ for (i = 0; i < waiters-1; i++) {
++diff --git a/testing/cv_wait_example_test.c b/testing/cv_wait_example_test.c
++index 66a3d0b..ab34cb5 100644
++--- a/testing/cv_wait_example_test.c
+++++ b/testing/cv_wait_example_test.c
++@@ -146,7 +146,7 @@ static void example_cv_wait (testing t) {
++ "five\n"
++ "timeout 1s\n";
++
++- memset (&q, 0, sizeof (q));
+++ memset ((void *) &q, 0, sizeof (q));
++ memset (&output, 0, sizeof (output));
++
++ closure_fork (closure_add_and_wait_cv (&add_and_wait_cv, &q,
++diff --git a/testing/mu_starvation_test.c b/testing/mu_starvation_test.c
++index 7960a03..addfa0b 100644
++--- a/testing/mu_starvation_test.c
+++++ b/testing/mu_starvation_test.c
++@@ -36,7 +36,7 @@ typedef struct starve_data_s {
++
++ /* initialize *sd */
++ static void starve_data_init (starve_data *sd, int threads) {
++- memset (sd, 0, sizeof (*sd));
+++ memset ((void *) sd, 0, sizeof (*sd));
++ sd->not_yet_started = threads;
++ sd->not_yet_done = threads;
++ sd->start = nsync_time_now ();
++diff --git a/testing/mu_test.c b/testing/mu_test.c
++index a855cb8..f71d96d 100644
++--- a/testing/mu_test.c
+++++ b/testing/mu_test.c
++@@ -110,7 +110,7 @@ static void test_mu_nthread (testing t) {
++ do {
++ int i;
++ test_data td;
++- memset (&td, 0, sizeof (td));
+++ memset ((void *) &td, 0, sizeof (td));
++ td.t = t;
++ td.n_threads = 5;
++ td.loop_count = loop_count;
++@@ -149,7 +149,7 @@ static void test_mutex_nthread (testing t) {
++ do {
++ int i;
++ test_data td;
++- memset (&td, 0, sizeof (td));
+++ memset ((void *) &td, 0, sizeof (td));
++ td.t = t;
++ td.n_threads = 5;
++ td.loop_count = loop_count;
++@@ -190,7 +190,7 @@ static void test_rwmutex_nthread (testing t) {
++ do {
++ int i;
++ test_data td;
++- memset (&td, 0, sizeof (td));
+++ memset ((void *) &td, 0, sizeof (td));
++ td.t = t;
++ td.n_threads = 5;
++ td.loop_count = loop_count;
++@@ -243,7 +243,7 @@ static void test_try_mu_nthread (testing t) {
++ do {
++ int i;
++ test_data td;
++- memset (&td, 0, sizeof (td));
+++ memset ((void *) &td, 0, sizeof (td));
++ td.t = t;
++ td.n_threads = 5;
++ td.loop_count = loop_count;
++@@ -275,7 +275,7 @@ typedef struct counter_s {
++ /* Return a counter with initial value "initial". */
++ static counter *counter_new (int initial) {
++ counter *c = (counter *) malloc (sizeof (*c));
++- memset (c, 0, sizeof (*c));
+++ memset ((void *) c, 0, sizeof (*c));
++ c->value = initial;
++ return (c);
++ }
++@@ -1017,7 +1017,7 @@ static void contended_state_run_test (contended_state *cs, testing t,
++ nsync_mu locks, with small critical sections. */
++ static void benchmark_mu_contended (testing t) {
++ contended_state cs;
++- memset (&cs, 0, sizeof (cs));
+++ memset ((void *) &cs, 0, sizeof (cs));
++ contended_state_run_test (&cs, t, &cs.mu, (void (*) (void*))&nsync_mu_lock,
++ (void (*) (void*))&nsync_mu_unlock);
++ }
++@@ -1026,7 +1026,7 @@ static void benchmark_mu_contended (testing t) {
++ pthread_mutex_t locks, with small critical sections. */
++ static void benchmark_mutex_contended (testing t) {
++ contended_state cs;
++- memset (&cs, 0, sizeof (cs));
+++ memset ((void *) &cs, 0, sizeof (cs));
++ pthread_mutex_init (&cs.mutex, NULL);
++ contended_state_run_test (&cs, t, &cs.mutex, &void_pthread_mutex_lock,
++ &void_pthread_mutex_unlock);
++@@ -1037,7 +1037,7 @@ static void benchmark_mutex_contended (testing t) {
++ pthread_rwlock_t locks, with small critical sections. */
++ static void benchmark_wmutex_contended (testing t) {
++ contended_state cs;
++- memset (&cs, 0, sizeof (cs));
+++ memset ((void *) &cs, 0, sizeof (cs));
++ pthread_rwlock_init (&cs.rwmutex, NULL);
++ contended_state_run_test (&cs, t, &cs.rwmutex, &void_pthread_rwlock_wrlock,
++ &void_pthread_rwlock_unlock);
++diff --git a/testing/mu_wait_example_test.c b/testing/mu_wait_example_test.c
++index 110339b..053568d 100644
++--- a/testing/mu_wait_example_test.c
+++++ b/testing/mu_wait_example_test.c
++@@ -145,7 +145,7 @@ static void example_mu_wait (testing t) {
++ "five\n"
++ "timeout 1s\n";
++
++- memset (&q, 0, sizeof (q));
+++ memset ((void *) &q, 0, sizeof (q));
++ memset (&output, 0, sizeof (output));
++
++ closure_fork (closure_add_and_wait_mu (&add_and_wait_mu, &q, nsync_time_ms (500),
++diff --git a/testing/mu_wait_test.c b/testing/mu_wait_test.c
++index 5c8b2b4..58fce7a 100644
++--- a/testing/mu_wait_test.c
+++++ b/testing/mu_wait_test.c
++@@ -39,7 +39,7 @@ static mu_queue *mu_queue_new (int limit) {
++ mu_queue *q;
++ int size = offsetof (struct mu_queue_s, data) + sizeof (q->data[0]) * limit;
++ q = (mu_queue *) malloc (size);
++- memset (q, 0, size);
+++ memset ((void *) q, 0, size);
++ q->limit = limit;
++ return (q);
++ }
++diff --git a/testing/once_test.c b/testing/once_test.c
++index ec302f7..17ba823 100644
++--- a/testing/once_test.c
+++++ b/testing/once_test.c
++@@ -94,7 +94,7 @@ static void test_once_run (testing t) {
++ for (i = 0; i != 250; i++) {
++ struct once_test_s *s =
++ (struct once_test_s *) malloc (sizeof (*s));
++- memset (s, 0, sizeof (*s));
+++ memset ((void *) s, 0, sizeof (*s));
++ s->counter = 0;
++ s->done = nsync_counter_new (N);
++ s->t = t;
++diff --git a/testing/pingpong_test.c b/testing/pingpong_test.c
++index 7aa9520..442abdd 100644
++--- a/testing/pingpong_test.c
+++++ b/testing/pingpong_test.c
++@@ -44,7 +44,7 @@ typedef struct ping_pong_s {
++ } ping_pong;
++
++ static void ping_pong_init (ping_pong *pp, int limit) {
++- memset (pp, 0, sizeof (*pp));
+++ memset ((void *) pp, 0, sizeof (*pp));
++ pthread_mutex_init (&pp->mutex, NULL);
++ pthread_rwlock_init (&pp->rwmutex, NULL);
++ pthread_cond_init (&pp->cond[0], NULL);
++diff --git a/testing/testing.c b/testing/testing.c
++index 7d0ac81..ad89921 100644
++--- a/testing/testing.c
+++++ b/testing/testing.c
++@@ -137,7 +137,7 @@ testing_base testing_new (int argc, char *argv[], int flags) {
++ int i;
++ int argn;
++ testing_base tb = (testing_base)malloc (sizeof (*tb));
++- memset (tb, 0, sizeof (*tb));
+++ memset ((void *) tb, 0, sizeof (*tb));
++ tb->flags = flags;
++ tb->fp = stderr;
++ tb->argc = argc;
++@@ -298,7 +298,7 @@ CLOSURE_DECL_BODY1 (testing, testing)
++ /* Return whether there's a "spare thread"; that is, whether the current count
++ of child threads is less than the allowed parallelism. */
++ static int spare_thread (const void *v) {
++- const testing_base tb = (const testing_base) v;
+++ const_testing_base tb = (const_testing_base) v;
++ return (tb->child_count < tb->parallelism);
++ }
++
++@@ -348,7 +348,7 @@ void testing_run_ (testing_base tb, void (*f) (testing t), const char *name, int
++ (tb->include_pat == NULL || match (tb->include_pat, name)) &&
++ (tb->exclude_pat == NULL || !match (tb->exclude_pat, name))) {
++ testing t = (testing) malloc (sizeof (*t));
++- memset (t, 0, sizeof (*t));
+++ memset ((void *) t, 0, sizeof (*t));
++ nsync_dll_init_ (&t->siblings, t);
++ t->base = tb;
++ t->f = f;
++diff --git a/testing/testing.h b/testing/testing.h
++index b83d292..d78049e 100644
++--- a/testing/testing.h
+++++ b/testing/testing.h
++@@ -16,6 +16,7 @@
++ #define NSYNC_TESTING_TESTING_H_
++
++ typedef struct testing_base_s *testing_base;
+++typedef const struct testing_base_s *const_testing_base;
++ typedef struct testing_s *testing;
++
++ /* Return a newly initialized testing_base. */
diff --git a/gcc3.diff b/gcc3.diff
new file mode 100644
index 000000000000..d729f4d389b9
--- /dev/null
+++ b/gcc3.diff
@@ -0,0 +1,31 @@
+diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
+index c4f6bd5ebd..a95ba8070f 100644
+--- a/tensorflow/workspace.bzl
++++ b/tensorflow/workspace.bzl
+@@ -370,6 +370,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
+ urls = PROTOBUF_urls,
+ sha256 = PROTOBUF_sha256,
+ strip_prefix = PROTOBUF_strip_prefix,
++ patch_file = clean_dep("//third_party/protobuf:gcc9.patch"),
+ )
+
+ # We need to import the protobuf library under the names com_google_protobuf
+diff --git a/third_party/protobuf/gcc9.patch b/third_party/protobuf/gcc9.patch
+new file mode 100644
+index 0000000000..fbef873a85
+--- /dev/null
++++ b/third_party/protobuf/gcc9.patch
+@@ -0,0 +1,13 @@
++diff --git a/BUILD b/BUILD
++index 19f07c5..25adc4b 100644
++--- a/BUILD
+++++ b/BUILD
++@@ -35,7 +35,7 @@ COPTS = select({
++ "-Wno-sign-compare",
++ "-Wno-unused-function",
++ # Prevents ISO C++ const string assignment warnings for pyext sources.
++- "-Wno-writable-strings",
+++ "-Wno-write-strings",
++ ],
++ })
++
diff --git a/py37.diff b/py37.diff
new file mode 100644
index 000000000000..9fdbc22f93a8
--- /dev/null
+++ b/py37.diff
@@ -0,0 +1,220 @@
+diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
+index eb5b5796c4..c4f6bd5ebd 100644
+--- a/tensorflow/workspace.bzl
++++ b/tensorflow/workspace.bzl
+@@ -690,6 +690,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
+ strip_prefix = "cython-3732784c45cfb040a5b0936951d196f83a12ea17",
+ build_file = clean_dep("//third_party:cython.BUILD"),
+ delete = ["BUILD.bazel"],
++ patch_file = clean_dep("//third_party:py37.patch"),
+ )
+
+ tf_http_archive(
+diff --git a/third_party/py37.patch b/third_party/py37.patch
+new file mode 100644
+index 0000000000..a2f71034c4
+--- /dev/null
++++ b/third_party/py37.patch
+@@ -0,0 +1,202 @@
++diff --git a/Cython/Compiler/ExprNodes.py b/Cython/Compiler/ExprNodes.py
++index ad0a72b..d7ffb9c 100644
++--- a/Cython/Compiler/ExprNodes.py
+++++ b/Cython/Compiler/ExprNodes.py
++@@ -2866,18 +2866,18 @@ class WithExitCallNode(ExprNode):
++ # The __exit__() call of a 'with' statement. Used in both the
++ # except and finally clauses.
++
++- # with_stat WithStatNode the surrounding 'with' statement
++- # args TupleNode or ResultStatNode the exception info tuple
++- # await AwaitExprNode the await expression of an 'async with' statement
+++ # with_stat WithStatNode the surrounding 'with' statement
+++ # args TupleNode or ResultStatNode the exception info tuple
+++ # await_expr AwaitExprNode the await expression of an 'async with' statement
++
++- subexprs = ['args', 'await']
+++ subexprs = ['args', 'await_expr']
++ test_if_run = True
++- await = None
+++ await_expr = None
++
++ def analyse_types(self, env):
++ self.args = self.args.analyse_types(env)
++- if self.await:
++- self.await = self.await.analyse_types(env)
+++ if self.await_expr:
+++ self.await_expr = self.await_expr.analyse_types(env)
++ self.type = PyrexTypes.c_bint_type
++ self.is_temp = True
++ return self
++@@ -2904,12 +2904,12 @@ class WithExitCallNode(ExprNode):
++ code.putln(code.error_goto_if_null(result_var, self.pos))
++ code.put_gotref(result_var)
++
++- if self.await:
+++ if self.await_expr:
++ # FIXME: result_var temp currently leaks into the closure
++- self.await.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
++- code.putln("%s = %s;" % (result_var, self.await.py_result()))
++- self.await.generate_post_assignment_code(code)
++- self.await.free_temps(code)
+++ self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
+++ code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
+++ self.await_expr.generate_post_assignment_code(code)
+++ self.await_expr.free_temps(code)
++
++ if self.result_is_used:
++ self.allocate_temp_result(code)
++diff --git a/Cython/Compiler/ParseTreeTransforms.py b/Cython/Compiler/ParseTreeTransforms.py
++index 7ee9861..129cd24 100644
++--- a/Cython/Compiler/ParseTreeTransforms.py
+++++ b/Cython/Compiler/ParseTreeTransforms.py
++@@ -1292,7 +1292,7 @@ class WithTransform(CythonTransform, SkipDeclarations):
++ pos, with_stat=node,
++ test_if_run=False,
++ args=excinfo_target,
++- await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
+++ await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
++ body=Nodes.ReraiseStatNode(pos),
++ ),
++ ],
++@@ -1314,7 +1314,7 @@ class WithTransform(CythonTransform, SkipDeclarations):
++ test_if_run=True,
++ args=ExprNodes.TupleNode(
++ pos, args=[ExprNodes.NoneNode(pos) for _ in range(3)]),
++- await=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
+++ await_expr=ExprNodes.AwaitExprNode(pos, arg=None) if is_async else None)),
++ handle_error_case=False,
++ )
++ return node
++diff --git a/Cython/Utility/Coroutine.c b/Cython/Utility/Coroutine.c
++index f6afa78..4a7d723 100644
++--- a/Cython/Utility/Coroutine.c
+++++ b/Cython/Utility/Coroutine.c
++@@ -1719,13 +1719,20 @@ static void __Pyx__ReturnWithStopIteration(PyObject* value) {
++ Py_INCREF(value);
++ exc = value;
++ }
+++ #if CYTHON_FAST_THREAD_STATE
++ __Pyx_PyThreadState_assign
++- if (!$local_tstate_cname->exc_type) {
+++ #if PY_VERSION_HEX >= 0x030700A2
+++ if (!$local_tstate_cname->exc_state.exc_type)
+++ #else
+++ if (!$local_tstate_cname->exc_type)
+++ #endif
+++ {
++ // no chaining needed => avoid the overhead in PyErr_SetObject()
++ Py_INCREF(PyExc_StopIteration);
++ __Pyx_ErrRestore(PyExc_StopIteration, exc, NULL);
++ return;
++ }
+++ #endif
++ #else
++ args = PyTuple_Pack(1, value);
++ if (unlikely(!args)) return;
++diff --git a/Cython/Utility/Exceptions.c b/Cython/Utility/Exceptions.c
++index 6b891e9..d49a2b2 100644
++--- a/Cython/Utility/Exceptions.c
+++++ b/Cython/Utility/Exceptions.c
++@@ -357,12 +357,21 @@ static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
++ *value = local_value;
++ *tb = local_tb;
++ #if CYTHON_FAST_THREAD_STATE
+++ #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 7
+++ tmp_type = tstate->exc_state.exc_type;
+++ tmp_value = tstate->exc_state.exc_value;
+++ tmp_tb = tstate->exc_state.exc_traceback;
+++ tstate->exc_state.exc_type = local_type;
+++ tstate->exc_state.exc_value = local_value;
+++ tstate->exc_state.exc_traceback = local_tb;
+++ #else
++ tmp_type = tstate->exc_type;
++ tmp_value = tstate->exc_value;
++ tmp_tb = tstate->exc_traceback;
++ tstate->exc_type = local_type;
++ tstate->exc_value = local_value;
++ tstate->exc_traceback = local_tb;
+++ #endif
++ // Make sure tstate is in a consistent state when we XDECREF
++ // these objects (DECREF may run arbitrary code).
++ Py_XDECREF(tmp_type);
++@@ -392,9 +401,15 @@ static CYTHON_INLINE void __Pyx_ReraiseException(void) {
++ PyObject *type = NULL, *value = NULL, *tb = NULL;
++ #if CYTHON_FAST_THREAD_STATE
++ PyThreadState *tstate = PyThreadState_GET();
+++ #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 7
+++ type = tstate->exc_state.exc_type;
+++ value = tstate->exc_state.exc_value;
+++ tb = tstate->exc_state.exc_traceback;
+++ #else
++ type = tstate->exc_type;
++ value = tstate->exc_value;
++ tb = tstate->exc_traceback;
+++ #endif
++ #else
++ PyErr_GetExcInfo(&type, &value, &tb);
++ #endif
++@@ -438,9 +453,15 @@ static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject
++
++ #if CYTHON_FAST_THREAD_STATE
++ static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+++ #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 7
+++ *type = tstate->exc_state.exc_type;
+++ *value = tstate->exc_state.exc_value;
+++ *tb = tstate->exc_state.exc_traceback;
+++ #else
++ *type = tstate->exc_type;
++ *value = tstate->exc_value;
++ *tb = tstate->exc_traceback;
+++ #endif
++ Py_XINCREF(*type);
++ Py_XINCREF(*value);
++ Py_XINCREF(*tb);
++@@ -448,12 +469,22 @@ static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject *
++
++ static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
++ PyObject *tmp_type, *tmp_value, *tmp_tb;
+++
+++ #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 7
+++ tmp_type = tstate->exc_state.exc_type;
+++ tmp_value = tstate->exc_state.exc_value;
+++ tmp_tb = tstate->exc_state.exc_traceback;
+++ tstate->exc_state.exc_type = type;
+++ tstate->exc_state.exc_value = value;
+++ tstate->exc_state.exc_traceback = tb;
+++ #else
++ tmp_type = tstate->exc_type;
++ tmp_value = tstate->exc_value;
++ tmp_tb = tstate->exc_traceback;
++ tstate->exc_type = type;
++ tstate->exc_value = value;
++ tstate->exc_traceback = tb;
+++ #endif
++ Py_XDECREF(tmp_type);
++ Py_XDECREF(tmp_value);
++ Py_XDECREF(tmp_tb);
++@@ -476,6 +507,16 @@ static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value,
++ #if CYTHON_FAST_THREAD_STATE
++ static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
++ PyObject *tmp_type, *tmp_value, *tmp_tb;
+++
+++ #if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 7
+++ tmp_type = tstate->exc_state.exc_type;
+++ tmp_value = tstate->exc_state.exc_value;
+++ tmp_tb = tstate->exc_state.exc_traceback;
+++
+++ tstate->exc_state.exc_type = *type;
+++ tstate->exc_state.exc_value = *value;
+++ tstate->exc_state.exc_traceback = *tb;
+++ #else
++ tmp_type = tstate->exc_type;
++ tmp_value = tstate->exc_value;
++ tmp_tb = tstate->exc_traceback;
++@@ -484,6 +525,7 @@ static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject *
++ tstate->exc_value = *value;
++ tstate->exc_traceback = *tb;
++
+++ #endif
++ *type = tmp_type;
++ *value = tmp_value;
++ *tb = tmp_tb;
diff --git a/python37.patch b/python37.patch
new file mode 100644
index 000000000000..c917998763be
--- /dev/null
+++ b/python37.patch
@@ -0,0 +1,541 @@
+Support python 3.7, cherry-pick of:
+https://github.com/tensorflow/tensorflow/commit/abb903df7a5998b33547c02e95f9fa47c00f31f4
+https://github.com/tensorflow/tensorflow/commit/2b0805301e4531dd7c2ed677d932f6408675460e
+https://github.com/tensorflow/tensorflow/pull/21202
+https://github.com/tensorflow/tensorflow/pull/23453
+---
+diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
+index 81221c4078..50873a07d8 100644
+--- a/tensorflow/c/eager/c_api.cc
++++ b/tensorflow/c/eager/c_api.cc
+@@ -46,6 +46,7 @@ limitations under the License.
+ #include "tensorflow/core/framework/tensor_shape.pb.h"
+ #include "tensorflow/core/framework/types.h"
+ #include "tensorflow/core/lib/core/refcount.h"
++#include "tensorflow/core/lib/core/stringpiece.h"
+ #include "tensorflow/core/lib/gtl/cleanup.h"
+ #include "tensorflow/core/lib/gtl/flatmap.h"
+ #include "tensorflow/core/lib/gtl/map_util.h"
+@@ -200,8 +201,8 @@ void TFE_ContextOptionsSetConfig(TFE_ContextOptions* options, const void* proto,
+ }
+
+ void TFE_ContextOptionsSetAsync(TFE_ContextOptions* options,
+- unsigned char async) {
+- options->async = async;
++ unsigned char enable) {
++ options->async = enable;
+ }
+ void TFE_ContextOptionsSetDevicePlacementPolicy(
+ TFE_ContextOptions* options, TFE_ContextDevicePlacementPolicy policy) {
+@@ -218,9 +219,9 @@ TF_CAPI_EXPORT extern void TFE_ContextOptionsSetServerDef(
+ }
+
+ TF_CAPI_EXPORT extern void TFE_ContextSetAsyncForThread(TFE_Context* ctx,
+- unsigned char async,
++ unsigned char enable,
+ TF_Status* status) {
+- status->status = ctx->context.SetAsyncForThread(async);
++ status->status = ctx->context.SetAsyncForThread(enable);
+ }
+
+ void TFE_DeleteContextOptions(TFE_ContextOptions* options) { delete options; }
+@@ -421,8 +422,11 @@ TF_AttrType TFE_OpNameGetAttrType(TFE_Context* ctx,
+ return ret;
+ }
+
+-void TFE_OpSetAttrString(TFE_Op* op, const char* attr_name, const char* value) {
+- op->operation.MutableAttrs()->Set(attr_name, value);
++void TFE_OpSetAttrString(TFE_Op* op, const char* attr_name, const void* value,
++ size_t length) {
++ op->operation.MutableAttrs()->Set(
++ attr_name,
++ tensorflow::StringPiece(static_cast<const char*>(value), length));
+ }
+
+ void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name, int64_t value) {
+@@ -473,16 +477,22 @@ void TFE_OpSetAttrFunction(TFE_Op* op, const char* attr_name,
+ op->operation.MutableAttrs()->Set(attr_name, attr_value);
+ }
+
+-#define TFE_OP_SET_ATTR_LIST(fn, type) \
+- void fn(TFE_Op* op, const char* attr_name, const type* values, \
+- int num_values) { \
+- op->operation.MutableAttrs()->Set( \
+- attr_name, \
+- tensorflow::gtl::ArraySlice<const type>(values, num_values)); \
++void TFE_OpSetAttrStringList(TFE_Op* op, const char* attr_name,
++ const void* const* values, const size_t* lengths,
++ int num_values) {
++ std::vector<tensorflow::StringPiece> v(num_values);
++ for (int i = 0; i < num_values; ++i) {
++ v[i] = tensorflow::StringPiece(static_cast<const char*>(values[i]),
++ lengths[i]);
+ }
+-TFE_OP_SET_ATTR_LIST(TFE_OpSetAttrStringList, char*)
+-TFE_OP_SET_ATTR_LIST(TFE_OpSetAttrFloatList, float)
+-#undef TFE_OP_SET_ATTR_LIST
++ op->operation.MutableAttrs()->Set(attr_name, v);
++}
++
++void TFE_OpSetAttrFloatList(TFE_Op* op, const char* attr_name,
++ const float* values, int num_values) {
++ op->operation.MutableAttrs()->Set(
++ attr_name, tensorflow::gtl::ArraySlice<const float>(values, num_values));
++}
+
+ void TFE_OpSetAttrIntList(TFE_Op* op, const char* attr_name,
+ const int64_t* values, int num_values) {
+@@ -655,9 +665,11 @@ void SetOpAttrValueScalar(TFE_Context* ctx, TFE_Op* op,
+ const tensorflow::AttrValue& default_value,
+ const char* attr_name, TF_Status* status) {
+ switch (default_value.value_case()) {
+- case tensorflow::AttrValue::kS:
+- TFE_OpSetAttrString(op, attr_name, default_value.s().data());
++ case tensorflow::AttrValue::kS: {
++ const string& v = default_value.s();
++ TFE_OpSetAttrString(op, attr_name, v.data(), v.size());
+ break;
++ }
+ case tensorflow::AttrValue::kI:
+ TFE_OpSetAttrInt(op, attr_name, static_cast<int64_t>(default_value.i()));
+ break;
+diff --git a/tensorflow/c/eager/c_api.h b/tensorflow/c/eager/c_api.h
+index 1862af3ce2..8c6bc63c9b 100644
+--- a/tensorflow/c/eager/c_api.h
++++ b/tensorflow/c/eager/c_api.h
+@@ -76,7 +76,7 @@ typedef enum TFE_ContextDevicePlacementPolicy {
+ // Sets the default execution mode (sync/async). Note that this can be
+ // overridden per thread using TFE_ContextSetAsyncForThread.
+ TF_CAPI_EXPORT extern void TFE_ContextOptionsSetAsync(TFE_ContextOptions*,
+- unsigned char async);
++ unsigned char enable);
+
+ TF_CAPI_EXPORT extern void TFE_ContextOptionsSetDevicePlacementPolicy(
+ TFE_ContextOptions*, TFE_ContextDevicePlacementPolicy);
+@@ -125,7 +125,7 @@ TFE_ContextGetDevicePlacementPolicy(TFE_Context*);
+
+ // Overrides the execution mode (sync/async) for the current thread.
+ TF_CAPI_EXPORT extern void TFE_ContextSetAsyncForThread(TFE_Context*,
+- unsigned char async,
++ unsigned char enable,
+ TF_Status* status);
+
+ // Causes the calling thread to block till all ops dispatched in async mode
+@@ -278,7 +278,8 @@ TF_CAPI_EXPORT extern TF_AttrType TFE_OpNameGetAttrType(
+
+ TF_CAPI_EXPORT extern void TFE_OpSetAttrString(TFE_Op* op,
+ const char* attr_name,
+- const char* value);
++ const void* value,
++ size_t length);
+ TF_CAPI_EXPORT extern void TFE_OpSetAttrInt(TFE_Op* op, const char* attr_name,
+ int64_t value);
+ TF_CAPI_EXPORT extern void TFE_OpSetAttrFloat(TFE_Op* op, const char* attr_name,
+@@ -305,7 +306,8 @@ TF_CAPI_EXPORT extern void TFE_OpSetAttrFunction(TFE_Op* op,
+
+ TF_CAPI_EXPORT extern void TFE_OpSetAttrStringList(TFE_Op* op,
+ const char* attr_name,
+- const char** value,
++ const void* const* values,
++ const size_t* lengths,
+ int num_values);
+ TF_CAPI_EXPORT extern void TFE_OpSetAttrIntList(TFE_Op* op,
+ const char* attr_name,
+diff --git a/tensorflow/c/eager/c_api_test.cc b/tensorflow/c/eager/c_api_test.cc
+index 27ff5f7211..b796246747 100644
+--- a/tensorflow/c/eager/c_api_test.cc
++++ b/tensorflow/c/eager/c_api_test.cc
+@@ -1083,8 +1083,8 @@ TFE_TensorHandle* CreateVariable(TFE_Context* ctx, float value,
+ if (TF_GetCode(status) != TF_OK) return nullptr;
+ TFE_OpSetAttrType(op, "dtype", TF_FLOAT);
+ TFE_OpSetAttrShape(op, "shape", {}, 0, status);
+- TFE_OpSetAttrString(op, "container", "");
+- TFE_OpSetAttrString(op, "shared_name", "");
++ TFE_OpSetAttrString(op, "container", "", 0);
++ TFE_OpSetAttrString(op, "shared_name", "", 0);
+ if (TF_GetCode(status) != TF_OK) return nullptr;
+ TFE_TensorHandle* var_handle = nullptr;
+ int num_retvals = 1;
+diff --git a/tensorflow/python/eager/pywrap_tfe.h b/tensorflow/python/eager/pywrap_tfe.h
+index a916a75f00..823c4078b8 100644
+--- a/tensorflow/python/eager/pywrap_tfe.h
++++ b/tensorflow/python/eager/pywrap_tfe.h
+@@ -89,7 +89,7 @@ int MaybeRaiseExceptionFromStatus(const tensorflow::Status& status,
+ PyObject* exception);
+
+ // Returns the string associated with the passed-in python object.
+-char* TFE_GetPythonString(PyObject* o);
++const char* TFE_GetPythonString(PyObject* o);
+
+ // Returns a unique id on each call.
+ int64_t get_uid();
+diff --git a/tensorflow/python/eager/pywrap_tfe_src.cc b/tensorflow/python/eager/pywrap_tfe_src.cc
+index 6c9481c3af..9cf428da99 100644
+--- a/tensorflow/python/eager/pywrap_tfe_src.cc
++++ b/tensorflow/python/eager/pywrap_tfe_src.cc
+@@ -205,14 +205,20 @@ bool ParseDimensionValue(const string& key, PyObject* py_value,
+ }
+
+ bool ParseStringValue(const string& key, PyObject* py_value, TF_Status* status,
+- const char** value) {
++ tensorflow::StringPiece* value) {
+ if (PyBytes_Check(py_value)) {
+- *value = PyBytes_AsString(py_value);
++ Py_ssize_t size = 0;
++ char* buf = nullptr;
++ if (PyBytes_AsStringAndSize(py_value, &buf, &size) < 0) return false;
++ *value = tensorflow::StringPiece(buf, size);
+ return true;
+ }
+ #if PY_MAJOR_VERSION >= 3
+ if (PyUnicode_Check(py_value)) {
+- *value = PyUnicode_AsUTF8(py_value);
++ Py_ssize_t size = 0;
++ const char* buf = PyUnicode_AsUTF8AndSize(py_value, &size);
++ if (buf == nullptr) return false;
++ *value = tensorflow::StringPiece(buf, size);
+ return true;
+ }
+ #endif
+@@ -275,8 +281,16 @@ bool SetOpAttrList(
+ }
+
+ if (type == TF_ATTR_STRING) {
+- PARSE_LIST(const char*, ParseStringValue);
+- TFE_OpSetAttrStringList(op, key, values.get(), num_values);
++ std::unique_ptr<const void*[]> values(new const void*[num_values]);
++ std::unique_ptr<size_t[]> lengths(new size_t[num_values]);
++ for (int i = 0; i < num_values; ++i) {
++ tensorflow::StringPiece value;
++ tensorflow::Safe_PyObjectPtr py_value(PySequence_ITEM(py_list, i));
++ if (!ParseStringValue(key, py_value.get(), status, &value)) return false;
++ values[i] = value.data();
++ lengths[i] = value.size();
++ }
++ TFE_OpSetAttrStringList(op, key, values.get(), lengths.get(), num_values);
+ } else if (type == TF_ATTR_INT) {
+ PARSE_LIST(int64_t, ParseInt64Value);
+ TFE_OpSetAttrIntList(op, key, values.get(), num_values);
+@@ -379,12 +393,15 @@ void SetOpAttrListDefault(
+ TF_Status* status) {
+ if (type == TF_ATTR_STRING) {
+ int num_values = attr.default_value().list().s_size();
+- std::unique_ptr<const char*[]> values(new const char*[num_values]);
++ std::unique_ptr<const void*[]> values(new const void*[num_values]);
++ std::unique_ptr<size_t[]> lengths(new size_t[num_values]);
+ (*attr_list_sizes)[key] = num_values;
+ for (int i = 0; i < num_values; i++) {
+- values[i] = attr.default_value().list().s(i).data();
++ const string& v = attr.default_value().list().s(i);
++ values[i] = v.data();
++ lengths[i] = v.size();
+ }
+- TFE_OpSetAttrStringList(op, key, values.get(), num_values);
++ TFE_OpSetAttrStringList(op, key, values.get(), lengths.get(), num_values);
+ } else if (type == TF_ATTR_INT) {
+ int num_values = attr.default_value().list().i_size();
+ std::unique_ptr<int64_t[]> values(new int64_t[num_values]);
+@@ -470,9 +487,9 @@ bool SetOpAttrScalar(
+ tensorflow::gtl::FlatMap<string, tensorflow::int64>* attr_list_sizes,
+ TF_Status* status) {
+ if (type == TF_ATTR_STRING) {
+- const char* value;
++ tensorflow::StringPiece value;
+ if (!ParseStringValue(key, py_value, status, &value)) return false;
+- TFE_OpSetAttrString(op, key, value);
++ TFE_OpSetAttrString(op, key, value.data(), value.size());
+ } else if (type == TF_ATTR_INT) {
+ int64_t value;
+ if (!ParseInt64Value(key, py_value, status, &value)) return false;
+@@ -533,7 +550,7 @@ bool SetOpAttrScalar(
+ // (which is what the various "defun" or "Defun" decorators do).
+ // And in the future also allow an object that can encapsulate
+ // the function name and its attribute values.
+- const char* func_name = nullptr;
++ tensorflow::StringPiece func_name;
+ if (!ParseStringValue(key, py_value, status, &func_name)) {
+ PyObject* name_attr = PyObject_GetAttrString(py_value, "name");
+ if (name_attr == nullptr ||
+@@ -549,7 +566,8 @@ bool SetOpAttrScalar(
+ return false;
+ }
+ }
+- TFE_Op* func = TFE_NewOp(ctx, func_name, status);
++ TFE_Op* func = TFE_NewOp(
++ ctx, string(func_name.data(), func_name.size()).c_str(), status);
+ if (TF_GetCode(status) != TF_OK) return false;
+ TFE_OpSetAttrFunction(op, key, func);
+ TFE_DeleteOp(func);
+@@ -807,7 +825,7 @@ int MaybeRaiseExceptionFromStatus(const tensorflow::Status& status,
+ return -1;
+ }
+
+-char* TFE_GetPythonString(PyObject* o) {
++const char* TFE_GetPythonString(PyObject* o) {
+ if (PyBytes_Check(o)) {
+ return PyBytes_AsString(o);
+ }
+diff --git a/tensorflow/python/lib/core/ndarray_tensor.cc b/tensorflow/python/lib/core/ndarray_tensor.cc
+index 9df38d464c..f87674a524 100644
+--- a/tensorflow/python/lib/core/ndarray_tensor.cc
++++ b/tensorflow/python/lib/core/ndarray_tensor.cc
+@@ -136,6 +136,31 @@ Status PyArray_TYPE_to_TF_DataType(PyArrayObject* array,
+ return Status::OK();
+ }
+
++Status PyObjectToString(PyObject* obj, const char** ptr, Py_ssize_t* len) {
++ if (!PyUnicode_Check(obj)) {
++ char* buf;
++ if (PyBytes_AsStringAndSize(obj, &buf, len) != 0) {
++ return errors::Internal("Unable to get element as bytes.");
++ }
++ *ptr = buf;
++ return Status::OK();
++ }
++#if (PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3))
++ *ptr = PyUnicode_AsUTF8AndSize(obj, len);
++ if (*ptr != nullptr) return Status::OK();
++#else
++ PyObject* utemp = PyUnicode_AsUTF8String(obj);
++ char* buf;
++ if (utemp != nullptr && PyBytes_AsStringAndSize(utemp, &buf, len) != -1) {
++ *ptr = buf;
++ Py_DECREF(utemp);
++ return Status::OK();
++ }
++ Py_XDECREF(utemp);
++#endif
++ return errors::Internal("Unable to convert element to UTF-8.");
++}
++
+ // Iterate over the string array 'array', extract the ptr and len of each string
+ // element and call f(ptr, len).
+ template <typename F>
+@@ -148,33 +173,10 @@ Status PyBytesArrayMap(PyArrayObject* array, F f) {
+ if (!item) {
+ return errors::Internal("Unable to get element from the feed - no item.");
+ }
+- char* ptr;
+ Py_ssize_t len;
+-
+- if (PyUnicode_Check(item.get())) {
+-#if PY_VERSION_HEX >= 0x03030000
+- // Accept unicode by converting to UTF-8 bytes.
+- ptr = PyUnicode_AsUTF8AndSize(item.get(), &len);
+- if (!ptr) {
+- return errors::Internal("Unable to get element as UTF-8.");
+- }
+- f(ptr, len);
+-#else
+- PyObject* utemp = PyUnicode_AsUTF8String(item.get());
+- if (!utemp || PyBytes_AsStringAndSize(utemp, &ptr, &len) == -1) {
+- Py_XDECREF(utemp);
+- return errors::Internal("Unable to convert element to UTF-8.");
+- }
+- f(ptr, len);
+- Py_DECREF(utemp);
+-#endif
+- } else {
+- int success = PyBytes_AsStringAndSize(item.get(), &ptr, &len);
+- if (success != 0) {
+- return errors::Internal("Unable to get element as bytes.");
+- }
+- f(ptr, len);
+- }
++ const char* ptr;
++ TF_RETURN_IF_ERROR(PyObjectToString(item.get(), &ptr, &len));
++ f(ptr, len);
+ PyArray_ITER_NEXT(iter.get());
+ }
+ return Status::OK();
+@@ -186,10 +188,11 @@ Status EncodePyBytesArray(PyArrayObject* array, tensorflow::int64 nelems,
+ size_t* size, void** buffer) {
+ // Compute bytes needed for encoding.
+ *size = 0;
+- TF_RETURN_IF_ERROR(PyBytesArrayMap(array, [&size](char* ptr, Py_ssize_t len) {
+- *size +=
+- sizeof(tensorflow::uint64) + tensorflow::core::VarintLength(len) + len;
+- }));
++ TF_RETURN_IF_ERROR(
++ PyBytesArrayMap(array, [&size](const char* ptr, Py_ssize_t len) {
++ *size += sizeof(tensorflow::uint64) +
++ tensorflow::core::VarintLength(len) + len;
++ }));
+ // Encode all strings.
+ std::unique_ptr<char[]> base_ptr(new char[*size]);
+ char* base = base_ptr.get();
+@@ -198,7 +201,7 @@ Status EncodePyBytesArray(PyArrayObject* array, tensorflow::int64 nelems,
+ tensorflow::uint64* offsets = reinterpret_cast<tensorflow::uint64*>(base);
+
+ TF_RETURN_IF_ERROR(PyBytesArrayMap(
+- array, [&base, &data_start, &dst, &offsets](char* ptr, Py_ssize_t len) {
++ array, [&data_start, &dst, &offsets](const char* ptr, Py_ssize_t len) {
+ *offsets = (dst - data_start);
+ offsets++;
+ dst = tensorflow::core::EncodeVarint64(dst, len);
+diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc
+index a13fdbb57c..a7f8065fb3 100644
+--- a/tensorflow/python/lib/core/py_func.cc
++++ b/tensorflow/python/lib/core/py_func.cc
+@@ -303,6 +303,35 @@ class NumpyTensorBuffer : public TensorBuffer {
+ void* data_;
+ };
+
++Status PyObjectToString(PyObject* obj, string* str) {
++ char* py_bytes;
++ Py_ssize_t size;
++ if (PyBytes_AsStringAndSize(obj, &py_bytes, &size) != -1) {
++ str->assign(py_bytes, size);
++ return Status::OK();
++ }
++#if PY_MAJOR_VERSION >= 3
++ const char* ptr = PyUnicode_AsUTF8AndSize(obj, &size);
++ if (ptr != nullptr) {
++ str->assign(ptr, size);
++ return Status::OK();
++ }
++#else
++ if (PyUnicode_Check(obj)) {
++ PyObject* unicode = PyUnicode_AsUTF8String(obj);
++ char* ptr;
++ if (unicode && PyString_AsStringAndSize(unicode, &ptr, &size) != -1) {
++ str->assign(ptr, size);
++ Py_DECREF(unicode);
++ return Status::OK();
++ }
++ Py_XDECREF(unicode);
++ }
++#endif
++ return errors::Unimplemented("Unsupported object type ",
++ obj->ob_type->tp_name);
++}
++
+ Status ConvertNdarrayToTensor(PyObject* obj, Tensor* ret) {
+ PyArrayObject* input = reinterpret_cast<PyArrayObject*>(obj);
+ DataType dtype = DT_INVALID;
+@@ -318,29 +347,7 @@ Status ConvertNdarrayToTensor(PyObject* obj, Tensor* ret) {
+ auto tflat = t.flat<string>();
+ PyObject** input_data = reinterpret_cast<PyObject**>(PyArray_DATA(input));
+ for (int i = 0; i < tflat.dimension(0); ++i) {
+- char* el;
+- Py_ssize_t el_size;
+- if (PyBytes_AsStringAndSize(input_data[i], &el, &el_size) == -1) {
+-#if PY_MAJOR_VERSION >= 3
+- el = PyUnicode_AsUTF8AndSize(input_data[i], &el_size);
+-#else
+- el = nullptr;
+- if (PyUnicode_Check(input_data[i])) {
+- PyObject* unicode = PyUnicode_AsUTF8String(input_data[i]);
+- if (unicode) {
+- if (PyString_AsStringAndSize(unicode, &el, &el_size) == -1) {
+- Py_DECREF(unicode);
+- el = nullptr;
+- }
+- }
+- }
+-#endif
+- if (!el) {
+- return errors::Unimplemented("Unsupported object type ",
+- input_data[i]->ob_type->tp_name);
+- }
+- }
+- tflat(i) = string(el, el_size);
++ TF_RETURN_IF_ERROR(PyObjectToString(input_data[i], &tflat(i)));
+ }
+ *ret = t;
+ break;
+diff --git a/tensorflow/python/pywrap_tfe.i b/tensorflow/python/pywrap_tfe.i
+index 500dc30cc3..eb627d3ecc 100644
+--- a/tensorflow/python/pywrap_tfe.i
++++ b/tensorflow/python/pywrap_tfe.i
+@@ -102,20 +102,29 @@ limitations under the License.
+ }
+ }
+
++// For const parameters in a function, SWIG pretty much ignores the const.
++// See: http://www.swig.org/Doc2.0/SWIG.html#SWIG_nn13
++// Hence the 'const_cast'.
+ %typemap(in) const char* serialized_function_def {
+- $1 = TFE_GetPythonString($input);
++ $1 = const_cast<char*>(TFE_GetPythonString($input));
+ }
+
++// For const parameters in a function, SWIG pretty much ignores the const.
++// See: http://www.swig.org/Doc2.0/SWIG.html#SWIG_nn13
++// Hence the 'const_cast'.
+ %typemap(in) const char* device_name {
+ if ($input == Py_None) {
+ $1 = nullptr;
+ } else {
+- $1 = TFE_GetPythonString($input);
++ $1 = const_cast<char*>(TFE_GetPythonString($input));
+ }
+ }
+
++// For const parameters in a function, SWIG pretty much ignores the const.
++// See: http://www.swig.org/Doc2.0/SWIG.html#SWIG_nn13
++// Hence the 'const_cast'.
+ %typemap(in) const char* op_name {
+- $1 = TFE_GetPythonString($input);
++ $1 = const_cast<char*>(TFE_GetPythonString($input));
+ }
+
+ %typemap(in) (TFE_Context*) {
+diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
+index ea8c727e4d..44a95d6bad 100644
+--- a/tensorflow/workspace.bzl
++++ b/tensorflow/workspace.bzl
+@@ -358,14 +358,18 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
+ },
+ )
+
++ PROTOBUF_urls =[
++ "https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v3.6.1.2.tar.gz",
++ "https://github.com/protocolbuffers/protobuf/archive/v3.6.1.2.tar.gz",
++ ]
++ PROTOBUF_sha256 = "2244b0308846bb22b4ff0bcc675e99290ff9f1115553ae9671eba1030af31bc0"
++ PROTOBUF_strip_prefix = "protobuf-3.6.1.2"
++
+ tf_http_archive(
+ name = "protobuf_archive",
+- urls = [
+- "https://mirror.bazel.build/github.com/google/protobuf/archive/396336eb961b75f03b25824fe86cf6490fb75e3a.tar.gz",
+- "https://github.com/google/protobuf/archive/396336eb961b75f03b25824fe86cf6490fb75e3a.tar.gz",
+- ],
+- sha256 = "846d907acf472ae233ec0882ef3a2d24edbbe834b80c305e867ac65a1f2c59e3",
+- strip_prefix = "protobuf-396336eb961b75f03b25824fe86cf6490fb75e3a",
++ urls = PROTOBUF_urls,
++ sha256 = PROTOBUF_sha256,
++ strip_prefix = PROTOBUF_strip_prefix,
+ )
+
+ # We need to import the protobuf library under the names com_google_protobuf
+@@ -373,22 +377,16 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
+ # Unfortunately there is no way to alias http_archives at the moment.
+ tf_http_archive(
+ name = "com_google_protobuf",
+- urls = [
+- "https://mirror.bazel.build/github.com/google/protobuf/archive/396336eb961b75f03b25824fe86cf6490fb75e3a.tar.gz",
+- "https://github.com/google/protobuf/archive/396336eb961b75f03b25824fe86cf6490fb75e3a.tar.gz",
+- ],
+- sha256 = "846d907acf472ae233ec0882ef3a2d24edbbe834b80c305e867ac65a1f2c59e3",
+- strip_prefix = "protobuf-396336eb961b75f03b25824fe86cf6490fb75e3a",
++ urls = PROTOBUF_urls,
++ sha256 = PROTOBUF_sha256,
++ strip_prefix = PROTOBUF_strip_prefix,
+ )
+
+ tf_http_archive(
+ name = "com_google_protobuf_cc",
+- urls = [
+- "https://mirror.bazel.build/github.com/google/protobuf/archive/396336eb961b75f03b25824fe86cf6490fb75e3a.tar.gz",
+- "https://github.com/google/protobuf/archive/396336eb961b75f03b25824fe86cf6490fb75e3a.tar.gz",
+- ],
+- sha256 = "846d907acf472ae233ec0882ef3a2d24edbbe834b80c305e867ac65a1f2c59e3",
+- strip_prefix = "protobuf-396336eb961b75f03b25824fe86cf6490fb75e3a",
++ urls = PROTOBUF_urls,
++ sha256 = PROTOBUF_sha256,
++ strip_prefix = PROTOBUF_strip_prefix,
+ )
+
+ tf_http_archive(