summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authordrrossum2016-08-03 08:07:51 -0500
committerdrrossum2016-08-03 08:07:51 -0500
commit6c4060b1a2cc0723c20932e1b81fa52bb80dd38c (patch)
tree888a1b29b52f58b5b651a5b99166f57ddceb04a5
parent3c7c0c99fa7cf983b82326eab8e9c9dc6fde72d8 (diff)
downloadaur-linux-t460s.tar.gz
update hibernation patches and bump to 4.6.5
-rw-r--r--.SRCINFO24
-rw-r--r--PKGBUILD23
-rw-r--r--t460s_hibernate.patch181
-rw-r--r--x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch101
-rw-r--r--x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch265
5 files changed, 390 insertions, 204 deletions
diff --git a/.SRCINFO b/.SRCINFO
index 8761b24f7d34..d8cf4bc802d5 100644
--- a/.SRCINFO
+++ b/.SRCINFO
@@ -1,7 +1,7 @@
# Generated by mksrcinfo v8
-# Tue Jun 28 09:28:47 UTC 2016
+# Wed Aug 3 13:05:27 UTC 2016
pkgbase = linux-t460s
- pkgver = 4.6.3
+ pkgver = 4.6.5
pkgrel = 1
url = http://www.kernel.org/
arch = i686
@@ -16,24 +16,24 @@ pkgbase = linux-t460s
options = !strip
source = https://www.kernel.org/pub/linux/kernel/v4.x/linux-4.6.tar.xz
source = https://www.kernel.org/pub/linux/kernel/v4.x/linux-4.6.tar.sign
- source = https://www.kernel.org/pub/linux/kernel/v4.x/patch-4.6.3.xz
- source = https://www.kernel.org/pub/linux/kernel/v4.x/patch-4.6.3.sign
+ source = https://www.kernel.org/pub/linux/kernel/v4.x/patch-4.6.5.xz
+ source = https://www.kernel.org/pub/linux/kernel/v4.x/patch-4.6.5.sign
source = config
source = config.x86_64
source = linux.preset
source = change-default-console-loglevel.patch
- source = 0001-linux-4.6-rtlwifi-fix-atomic.patch
- source = t460s_hibernate.patch
+ source = x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch
+ source = x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch
sha256sums = a93771cd5a8ad27798f22e9240538dfea48d3a2bf2a6a6ab415de3f02d25d866
sha256sums = SKIP
- sha256sums = 036f83f8a3475d9e7e0b8edc188f9a4f495abc3b187ed87748cdbc063c0c419f
+ sha256sums = 857df33f085a0116b9d2322ffe3b23d5b7d8c4898427d79f68108a653e84910c
sha256sums = SKIP
sha256sums = 02e8b02e8cd10aa059917a489a9663e7f66bdf12c5ae8a1e0369bb2862da6b68
sha256sums = d59014b8f887c6aa9488ef5ff9bc5d4357850a979f3ff90a2999bbe24e5c6e15
sha256sums = f0d90e756f14533ee67afda280500511a62465b4f76adcc5effa95a40045179c
sha256sums = 1256b241cd477b265a3c2d64bdc19ffe3c9bbcee82ea3994c590c2c76e767d99
- sha256sums = ae0d16e81a915fae130125ba9d0b6fd2427e06f50b8b9514abc4029efe61ee98
- sha256sums = cbd0f6c59d7b152e36f69b5ac96f56e659aa13be8491205bdb933255de62463f
+ sha256sums = 9bc79ac0c9856cee18046bc5af587dc947b772621819c2ec6606b0c6e78f817f
+ sha256sums = 59ea6bf168d377d5a6a1df9b6def8e6d5c940c80dbc308a5334804bb3fb35fc3
pkgname = linux-t460s
pkgdesc = The Linux-t460s kernel and modules, includes patches for Thinkpad T460s support
@@ -43,14 +43,14 @@ pkgname = linux-t460s
depends = kmod
depends = mkinitcpio>=0.7
optdepends = crda: to set the correct wireless channels of your country
- provides = linux=4.6.3
+ provides = linux=4.6.5
backup = etc/mkinitcpio.d/linux-t460s.preset
pkgname = linux-t460s-headers
pkgdesc = Header files and scripts for building modules for Linux-t460s kernel
- provides = linux-headers=4.6.3
+ provides = linux-headers=4.6.5
pkgname = linux-t460s-docs
pkgdesc = Kernel hackers manual - HTML documentation that comes with the Linux-t460s kernel
- provides = linux-docs=4.6.3
+ provides = linux-docs=4.6.5
diff --git a/PKGBUILD b/PKGBUILD
index 7af94ae60889..acd5b82875e5 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -5,7 +5,7 @@
pkgbase=linux-t460s # Build stock -ARCH kernel
#pkgbase=linux-custom # Build kernel with a different name
_srcname=linux-4.6
-pkgver=4.6.3
+pkgver=4.6.5
pkgrel=1
arch=('i686' 'x86_64')
url="http://www.kernel.org/"
@@ -21,19 +21,21 @@ source=("https://www.kernel.org/pub/linux/kernel/v4.x/${_srcname}.tar.xz"
# standard config files for mkinitcpio ramdisk
'linux.preset'
'change-default-console-loglevel.patch'
- '0001-linux-4.6-rtlwifi-fix-atomic.patch'
-'t460s_hibernate.patch')
+'x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch'
+'x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch')
+#'nonboot-cpu.patch'
+#'t460s_hibernate.patch')
sha256sums=('a93771cd5a8ad27798f22e9240538dfea48d3a2bf2a6a6ab415de3f02d25d866'
'SKIP'
- '036f83f8a3475d9e7e0b8edc188f9a4f495abc3b187ed87748cdbc063c0c419f'
+ '857df33f085a0116b9d2322ffe3b23d5b7d8c4898427d79f68108a653e84910c'
'SKIP'
'02e8b02e8cd10aa059917a489a9663e7f66bdf12c5ae8a1e0369bb2862da6b68'
'd59014b8f887c6aa9488ef5ff9bc5d4357850a979f3ff90a2999bbe24e5c6e15'
'f0d90e756f14533ee67afda280500511a62465b4f76adcc5effa95a40045179c'
'1256b241cd477b265a3c2d64bdc19ffe3c9bbcee82ea3994c590c2c76e767d99'
- 'ae0d16e81a915fae130125ba9d0b6fd2427e06f50b8b9514abc4029efe61ee98'
- 'cbd0f6c59d7b152e36f69b5ac96f56e659aa13be8491205bdb933255de62463f')
+ '9bc79ac0c9856cee18046bc5af587dc947b772621819c2ec6606b0c6e78f817f'
+ '59ea6bf168d377d5a6a1df9b6def8e6d5c940c80dbc308a5334804bb3fb35fc3')
validpgpkeys=(
'ABAF11C65A2970B130ABE3C479BE3E4300411886' # Linus Torvalds
'647F28654894E3BD457199BE38DBBDC86092693E' # Greg Kroah-Hartman
@@ -52,17 +54,16 @@ prepare() {
# fix T460s hibernation bug
# https://patchwork.kernel.org/patch/9158227/
- patch --verbose -p1 -i "${srcdir}/t460s_hibernate.patch"
+ #patch --verbose -p1 -i "${srcdir}/t460s_hibernate.patch"
+ #patch --verbose -p1 -i "${srcdir}/nonboot-cpu.patch"
+ patch --verbose -p1 -i "${srcdir}/x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch"
+ patch --verbose -p1 -i "${srcdir}/x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch"
# set DEFAULT_CONSOLE_LOGLEVEL to 4 (same value as the 'quiet' kernel param)
# remove this when a Kconfig knob is made available by upstream
# (relevant patch sent upstream: https://lkml.org/lkml/2011/7/26/227)
patch -p1 -i "${srcdir}/change-default-console-loglevel.patch"
- # fix rtlwifi atomic
- # https://bugs.archlinux.org/task/49401
- patch -p1 -i "${srcdir}/0001-linux-4.6-rtlwifi-fix-atomic.patch"
-
if [ "${CARCH}" = "x86_64" ]; then
cat "${srcdir}/config.x86_64" > ./.config
else
diff --git a/t460s_hibernate.patch b/t460s_hibernate.patch
deleted file mode 100644
index 7ef533f0ff9b..000000000000
--- a/t460s_hibernate.patch
+++ /dev/null
@@ -1,181 +0,0 @@
-Index: linux-pm/arch/x86/power/hibernate_64.c
-===================================================================
---- linux-pm.orig/arch/x86/power/hibernate_64.c
-+++ linux-pm/arch/x86/power/hibernate_64.c
-@@ -27,7 +27,8 @@ extern asmlinkage __visible int restore_
- * Address to jump to in the last phase of restore in order to get to the image
- * kernel's text (this value is passed in the image header).
- */
--unsigned long restore_jump_address __visible;
-+void *restore_jump_address __visible;
-+unsigned long jump_address_phys;
-
- /*
- * Value of the cr3 register from before the hibernation (this value is passed
-@@ -37,8 +38,51 @@ unsigned long restore_cr3 __visible;
-
- pgd_t *temp_level4_pgt __visible;
-
-+void *restore_pgd_addr __visible;
-+pgd_t restore_pgd __visible;
-+
- void *relocated_restore_code __visible;
-
-+static int prepare_temporary_text_mapping(void)
-+{
-+ unsigned long vaddr = (unsigned long)restore_jump_address;
-+ unsigned long paddr = jump_address_phys & PMD_MASK;
-+ pmd_t *pmd;
-+ pud_t *pud;
-+
-+ /*
-+ * The new mapping only has to cover the page containing the image
-+ * kernel's entry point (jump_address_phys), because the switch over to
-+ * it is carried out by relocated code running from a page allocated
-+ * specifically for this purpose and covered by the identity mapping, so
-+ * the temporary kernel text mapping is only needed for the final jump.
-+ * However, in that mapping the virtual address of the image kernel's
-+ * entry point must be the same as its virtual address in the image
-+ * kernel (restore_jump_address), so the image kernel's
-+ * restore_registers() code doesn't find itself in a different area of
-+ * the virtual address space after switching over to the original page
-+ * tables used by the image kernel.
-+ */
-+ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
-+ if (!pud)
-+ return -ENOMEM;
-+
-+ restore_pgd = __pgd(__pa(pud) | _KERNPG_TABLE);
-+
-+ pud += pud_index(vaddr);
-+ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
-+ if (!pmd)
-+ return -ENOMEM;
-+
-+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
-+
-+ pmd += pmd_index(vaddr);
-+ set_pmd(pmd, __pmd(paddr | __PAGE_KERNEL_LARGE_EXEC));
-+
-+ restore_pgd_addr = temp_level4_pgt + pgd_index(vaddr);
-+ return 0;
-+}
-+
- static void *alloc_pgt_page(void *context)
- {
- return (void *)get_safe_page(GFP_ATOMIC);
-@@ -59,10 +103,19 @@ static int set_up_temporary_mappings(voi
- if (!temp_level4_pgt)
- return -ENOMEM;
-
-- /* It is safe to reuse the original kernel mapping */
-+ /* Re-use the original kernel text mapping for now */
- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
-
-+ /*
-+ * Prepare a temporary mapping for the kernel text, but don't use it
-+ * just yet, we'll switch over to it later. It only has to cover one
-+ * piece of code: the page containing the image kernel's entry point.
-+ */
-+ result = prepare_temporary_text_mapping();
-+ if (result)
-+ return result;
-+
- /* Set up the direct mapping from scratch */
- for (i = 0; i < nr_pfn_mapped; i++) {
- mstart = pfn_mapped[i].start << PAGE_SHIFT;
-@@ -108,12 +161,13 @@ int pfn_is_nosave(unsigned long pfn)
- }
-
- struct restore_data_record {
-- unsigned long jump_address;
-+ void *jump_address;
-+ unsigned long jump_address_phys;
- unsigned long cr3;
- unsigned long magic;
- };
-
--#define RESTORE_MAGIC 0x0123456789ABCDEFUL
-+#define RESTORE_MAGIC 0x123456789ABCDEF0UL
-
- /**
- * arch_hibernation_header_save - populate the architecture specific part
-@@ -126,7 +180,8 @@ int arch_hibernation_header_save(void *a
-
- if (max_size < sizeof(struct restore_data_record))
- return -EOVERFLOW;
-- rdr->jump_address = restore_jump_address;
-+ rdr->jump_address = &restore_registers;
-+ rdr->jump_address_phys = __pa_symbol(&restore_registers);
- rdr->cr3 = restore_cr3;
- rdr->magic = RESTORE_MAGIC;
- return 0;
-@@ -142,6 +197,7 @@ int arch_hibernation_header_restore(void
- struct restore_data_record *rdr = addr;
-
- restore_jump_address = rdr->jump_address;
-+ jump_address_phys = rdr->jump_address_phys;
- restore_cr3 = rdr->cr3;
- return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
- }
-Index: linux-pm/arch/x86/power/hibernate_asm_64.S
-===================================================================
---- linux-pm.orig/arch/x86/power/hibernate_asm_64.S
-+++ linux-pm/arch/x86/power/hibernate_asm_64.S
-@@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
- pushfq
- popq pt_regs_flags(%rax)
-
-- /* save the address of restore_registers */
-- movq $restore_registers, %rax
-- movq %rax, restore_jump_address(%rip)
- /* save cr3 */
- movq %cr3, %rax
- movq %rax, restore_cr3(%rip)
-@@ -72,8 +69,10 @@ ENTRY(restore_image)
- movq %rax, %cr4; # turn PGE back on
-
- /* prepare to jump to the image kernel */
-- movq restore_jump_address(%rip), %rax
- movq restore_cr3(%rip), %rbx
-+ movq restore_jump_address(%rip), %r10
-+ movq restore_pgd(%rip), %r8
-+ movq restore_pgd_addr(%rip), %r9
-
- /* prepare to copy image data to their original locations */
- movq restore_pblist(%rip), %rdx
-@@ -96,20 +95,22 @@ ENTRY(core_restore_code)
- /* progress to the next pbe */
- movq pbe_next(%rdx), %rdx
- jmp .Lloop
-+
- .Ldone:
-+ /* switch over to the temporary kernel text mapping */
-+ movq %r8, (%r9)
-+ /* flush TLB */
-+ movq %rax, %rdx
-+ andq $~(X86_CR4_PGE), %rdx
-+ movq %rdx, %cr4; # turn off PGE
-+ movq %cr3, %rcx; # flush TLB
-+ movq %rcx, %cr3;
-+ movq %rax, %cr4; # turn PGE back on
- /* jump to the restore_registers address from the image header */
-- jmpq *%rax
-- /*
-- * NOTE: This assumes that the boot kernel's text mapping covers the
-- * image kernel's page containing restore_registers and the address of
-- * this page is the same as in the image kernel's text mapping (it
-- * should always be true, because the text mapping is linear, starting
-- * from 0, and is supposed to cover the entire kernel text for every
-- * kernel).
-- *
-- * code below belongs to the image kernel
-- */
-+ jmpq *%r10
-
-+ /* code below belongs to the image kernel */
-+ .align PAGE_SIZE
- ENTRY(restore_registers)
- FRAME_BEGIN
- /* go back to the original page tables */
diff --git a/x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch b/x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch
new file mode 100644
index 000000000000..0517f9b6eca6
--- /dev/null
+++ b/x86-hibernate-Use-hlt_play_dead-when-resuming-from-hibernation.patch
@@ -0,0 +1,101 @@
+Index: linux-pm/kernel/power/hibernate.c
+===================================================================
+--- linux-pm.orig/kernel/power/hibernate.c
++++ linux-pm/kernel/power/hibernate.c
+@@ -409,6 +409,11 @@ int hibernation_snapshot(int platform_mo
+ goto Close;
+ }
+
++int __weak hibernate_resume_nonboot_cpu_disable(void)
++{
++ return disable_nonboot_cpus();
++}
++
+ /**
+ * resume_target_kernel - Restore system state from a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+@@ -433,7 +438,7 @@ static int resume_target_kernel(bool pla
+ if (error)
+ goto Cleanup;
+
+- error = disable_nonboot_cpus();
++ error = hibernate_resume_nonboot_cpu_disable();
+ if (error)
+ goto Enable_cpus;
+
+Index: linux-pm/kernel/power/power.h
+===================================================================
+--- linux-pm.orig/kernel/power/power.h
++++ linux-pm/kernel/power/power.h
+@@ -38,6 +38,8 @@ static inline char *check_image_kernel(s
+ }
+ #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
+
++extern int hibernate_resume_nonboot_cpu_disable(void);
++
+ /*
+ * Keep some memory free so that I/O operations can succeed without paging
+ * [Might this be more than 4 MB?]
+Index: linux-pm/arch/x86/power/cpu.c
+===================================================================
+--- linux-pm.orig/arch/x86/power/cpu.c
++++ linux-pm/arch/x86/power/cpu.c
+@@ -266,6 +266,27 @@ void notrace restore_processor_state(voi
+ EXPORT_SYMBOL(restore_processor_state);
+ #endif
+
++#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
++bool force_hlt_play_dead __read_mostly;
++
++int hibernate_resume_nonboot_cpu_disable(void)
++{
++ int ret;
++
++ /*
++ * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
++ * during hibernate image restoration, because it is likely that the
++ * monitored address will be actually written to at that time and then
++ * the "dead" CPU may start executing instructions from an image
++ * kernel's page (and that may not be the "play dead" loop any more).
++ */
++ force_hlt_play_dead = true;
++ ret = disable_nonboot_cpus();
++ force_hlt_play_dead = false;
++ return ret;
++}
++#endif
++
+ /*
+ * When bsp_check() is called in hibernate and suspend, cpu hotplug
+ * is disabled already. So it's unnessary to handle race condition between
+Index: linux-pm/arch/x86/kernel/smpboot.c
+===================================================================
+--- linux-pm.orig/arch/x86/kernel/smpboot.c
++++ linux-pm/arch/x86/kernel/smpboot.c
+@@ -1642,6 +1642,9 @@ void native_play_dead(void)
+ play_dead_common();
+ tboot_shutdown(TB_SHUTDOWN_WFS);
+
++ if (force_hlt_play_dead)
++ hlt_play_dead();
++
+ mwait_play_dead(); /* Only returns on failure */
+ if (cpuidle_play_dead())
+ hlt_play_dead();
+Index: linux-pm/arch/x86/include/asm/cpu.h
+===================================================================
+--- linux-pm.orig/arch/x86/include/asm/cpu.h
++++ linux-pm/arch/x86/include/asm/cpu.h
+@@ -26,6 +26,12 @@ struct x86_cpu {
+ };
+
+ #ifdef CONFIG_HOTPLUG_CPU
++#ifdef CONFIG_HIBERNATION
++extern bool force_hlt_play_dead;
++#else
++#define force_hlt_play_dead (false)
++#endif
++
+ extern int arch_register_cpu(int num);
+ extern void arch_unregister_cpu(int);
+ extern void start_cpu0(void);
diff --git a/x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch b/x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch
new file mode 100644
index 000000000000..cc1bdcd59ef3
--- /dev/null
+++ b/x86-power-64-Fix-kernel-text-mapping-corruption-during-image-restoration-was-Re-ktime_get_ts64-splat-during-resume_v5.patch
@@ -0,0 +1,265 @@
+Index: linux-pm/arch/x86/power/hibernate_64.c
+===================================================================
+--- linux-pm.orig/arch/x86/power/hibernate_64.c
++++ linux-pm/arch/x86/power/hibernate_64.c
+@@ -19,6 +19,7 @@
+ #include <asm/mtrr.h>
+ #include <asm/sections.h>
+ #include <asm/suspend.h>
++#include <asm/tlbflush.h>
+
+ /* Defined in hibernate_asm_64.S */
+ extern asmlinkage __visible int restore_image(void);
+@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_
+ * kernel's text (this value is passed in the image header).
+ */
+ unsigned long restore_jump_address __visible;
++unsigned long jump_address_phys;
+
+ /*
+ * Value of the cr3 register from before the hibernation (this value is passed
+@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
+
+ pgd_t *temp_level4_pgt __visible;
+
+-void *relocated_restore_code __visible;
++unsigned long relocated_restore_code __visible;
++
++static int set_up_temporary_text_mapping(void)
++{
++ pmd_t *pmd;
++ pud_t *pud;
++
++ /*
++ * The new mapping only has to cover the page containing the image
++ * kernel's entry point (jump_address_phys), because the switch over to
++ * it is carried out by relocated code running from a page allocated
++ * specifically for this purpose and covered by the identity mapping, so
++ * the temporary kernel text mapping is only needed for the final jump.
++ * Moreover, in that mapping the virtual address of the image kernel's
++ * entry point must be the same as its virtual address in the image
++ * kernel (restore_jump_address), so the image kernel's
++ * restore_registers() code doesn't find itself in a different area of
++ * the virtual address space after switching over to the original page
++ * tables used by the image kernel.
++ */
++ pud = (pud_t *)get_safe_page(GFP_ATOMIC);
++ if (!pud)
++ return -ENOMEM;
++
++ pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
++ if (!pmd)
++ return -ENOMEM;
++
++ set_pmd(pmd + pmd_index(restore_jump_address),
++ __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
++ set_pud(pud + pud_index(restore_jump_address),
++ __pud(__pa(pmd) | _KERNPG_TABLE));
++ set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
++ __pgd(__pa(pud) | _KERNPG_TABLE));
++
++ return 0;
++}
+
+ static void *alloc_pgt_page(void *context)
+ {
+@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(voi
+ if (!temp_level4_pgt)
+ return -ENOMEM;
+
+- /* It is safe to reuse the original kernel mapping */
+- set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
+- init_level4_pgt[pgd_index(__START_KERNEL_map)]);
++ /* Prepare a temporary mapping for the kernel text */
++ result = set_up_temporary_text_mapping();
++ if (result)
++ return result;
+
+ /* Set up the direct mapping from scratch */
+ for (i = 0; i < nr_pfn_mapped; i++) {
+@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(voi
+ return 0;
+ }
+
++static int relocate_restore_code(void)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++
++ relocated_restore_code = get_safe_page(GFP_ATOMIC);
++ if (!relocated_restore_code)
++ return -ENOMEM;
++
++ memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
++
++ /* Make the page containing the relocated code executable */
++ pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
++ pud = pud_offset(pgd, relocated_restore_code);
++ if (pud_large(*pud)) {
++ set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
++ } else {
++ pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
++
++ if (pmd_large(*pmd)) {
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
++ } else {
++ pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
++
++ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
++ }
++ }
++ __flush_tlb_all();
++
++ return 0;
++}
++
+ int swsusp_arch_resume(void)
+ {
+ int error;
+
+ /* We have got enough memory and from now on we cannot recover */
+- if ((error = set_up_temporary_mappings()))
++ error = set_up_temporary_mappings();
++ if (error)
+ return error;
+
+- relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
+- if (!relocated_restore_code)
+- return -ENOMEM;
+- memcpy(relocated_restore_code, &core_restore_code,
+- &restore_registers - &core_restore_code);
++ error = relocate_restore_code();
++ if (error)
++ return error;
+
+ restore_image();
+ return 0;
+@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
+
+ struct restore_data_record {
+ unsigned long jump_address;
++ unsigned long jump_address_phys;
+ unsigned long cr3;
+ unsigned long magic;
+ };
+
+-#define RESTORE_MAGIC 0x0123456789ABCDEFUL
++#define RESTORE_MAGIC 0x123456789ABCDEF0UL
+
+ /**
+ * arch_hibernation_header_save - populate the architecture specific part
+@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *a
+
+ if (max_size < sizeof(struct restore_data_record))
+ return -EOVERFLOW;
+- rdr->jump_address = restore_jump_address;
++ rdr->jump_address = (unsigned long)&restore_registers;
++ rdr->jump_address_phys = __pa_symbol(&restore_registers);
+ rdr->cr3 = restore_cr3;
+ rdr->magic = RESTORE_MAGIC;
+ return 0;
+@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void
+ struct restore_data_record *rdr = addr;
+
+ restore_jump_address = rdr->jump_address;
++ jump_address_phys = rdr->jump_address_phys;
+ restore_cr3 = rdr->cr3;
+ return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
+ }
+Index: linux-pm/arch/x86/power/hibernate_asm_64.S
+===================================================================
+--- linux-pm.orig/arch/x86/power/hibernate_asm_64.S
++++ linux-pm/arch/x86/power/hibernate_asm_64.S
+@@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
+ pushfq
+ popq pt_regs_flags(%rax)
+
+- /* save the address of restore_registers */
+- movq $restore_registers, %rax
+- movq %rax, restore_jump_address(%rip)
+ /* save cr3 */
+ movq %cr3, %rax
+ movq %rax, restore_cr3(%rip)
+@@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
+ ENDPROC(swsusp_arch_suspend)
+
+ ENTRY(restore_image)
+- /* switch to temporary page tables */
+- movq $__PAGE_OFFSET, %rdx
+- movq temp_level4_pgt(%rip), %rax
+- subq %rdx, %rax
+- movq %rax, %cr3
+- /* Flush TLB */
+- movq mmu_cr4_features(%rip), %rax
+- movq %rax, %rdx
+- andq $~(X86_CR4_PGE), %rdx
+- movq %rdx, %cr4; # turn off PGE
+- movq %cr3, %rcx; # flush TLB
+- movq %rcx, %cr3;
+- movq %rax, %cr4; # turn PGE back on
+-
+ /* prepare to jump to the image kernel */
+- movq restore_jump_address(%rip), %rax
+- movq restore_cr3(%rip), %rbx
++ movq restore_jump_address(%rip), %r8
++ movq restore_cr3(%rip), %r9
++
++ /* prepare to switch to temporary page tables */
++ movq temp_level4_pgt(%rip), %rax
++ movq mmu_cr4_features(%rip), %rbx
+
+ /* prepare to copy image data to their original locations */
+ movq restore_pblist(%rip), %rdx
++
++ /* jump to relocated restore code */
+ movq relocated_restore_code(%rip), %rcx
+ jmpq *%rcx
+
+ /* code below has been relocated to a safe page */
+ ENTRY(core_restore_code)
++ /* switch to temporary page tables */
++ movq $__PAGE_OFFSET, %rcx
++ subq %rcx, %rax
++ movq %rax, %cr3
++ /* flush TLB */
++ movq %rbx, %rcx
++ andq $~(X86_CR4_PGE), %rcx
++ movq %rcx, %cr4; # turn off PGE
++ movq %cr3, %rcx; # flush TLB
++ movq %rcx, %cr3;
++ movq %rbx, %cr4; # turn PGE back on
+ .Lloop:
+ testq %rdx, %rdx
+ jz .Ldone
+@@ -96,24 +96,17 @@ ENTRY(core_restore_code)
+ /* progress to the next pbe */
+ movq pbe_next(%rdx), %rdx
+ jmp .Lloop
++
+ .Ldone:
+ /* jump to the restore_registers address from the image header */
+- jmpq *%rax
+- /*
+- * NOTE: This assumes that the boot kernel's text mapping covers the
+- * image kernel's page containing restore_registers and the address of
+- * this page is the same as in the image kernel's text mapping (it
+- * should always be true, because the text mapping is linear, starting
+- * from 0, and is supposed to cover the entire kernel text for every
+- * kernel).
+- *
+- * code below belongs to the image kernel
+- */
++ jmpq *%r8
+
++ /* code below belongs to the image kernel */
++ .align PAGE_SIZE
+ ENTRY(restore_registers)
+ FRAME_BEGIN
+ /* go back to the original page tables */
+- movq %rbx, %cr3
++ movq %r9, %cr3
+
+ /* Flush TLB, including "global" things (vmalloc) */
+ movq mmu_cr4_features(%rip), %rax
+
+