diff options
Diffstat (limited to '0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch')
-rw-r--r-- | 0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch b/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch index e8b0631e7c49..aebc50bcdeb6 100644 --- a/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch +++ b/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch @@ -1,7 +1,7 @@ -From 8e7efbd6ea3242525db8205614a6e7e87ab89415 Mon Sep 17 00:00:00 2001 +From 836478d4e8f9a6b1ce067e597f29e59eb1422423 Mon Sep 17 00:00:00 2001 From: Reinette Chatre <reinette.chatre@intel.com> -Date: Wed, 9 Mar 2022 14:37:47 -0800 -Subject: [PATCH 05/30] x86/sgx: Support loading enclave page without VMA +Date: Wed, 13 Apr 2022 14:10:05 -0700 +Subject: [PATCH 05/31] x86/sgx: Support loading enclave page without VMA permissions check sgx_encl_load_page() is used to find and load an enclave page into @@ -19,9 +19,10 @@ A new call, sgx_encl_load_page_in_vma(), behaves exactly like the current sgx_encl_load_page() that takes VMA permissions into account, while sgx_encl_load_page() just loads an enclave page into EPC. -VMA, PTE, and EPCM permissions would continue to dictate whether +VMA, PTE, and EPCM permissions continue to dictate whether the pages can be accessed from within an enclave. +Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> --- arch/x86/kernel/cpu/sgx/encl.c | 57 ++++++++++++++++++++++------------ @@ -29,10 +30,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c -index 7c63a1911fae..05ae1168391c 100644 +index 001808e3901c..b45fcecea4bd 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c -@@ -131,25 +131,10 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, +@@ -90,25 +90,10 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, return epc_page; } @@ -60,7 +61,7 @@ index 7c63a1911fae..05ae1168391c 100644 /* Entry successfully located. */ if (entry->epc_page) { -@@ -175,6 +160,40 @@ static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, +@@ -134,6 +119,40 @@ static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, return entry; } @@ -101,7 +102,7 @@ index 7c63a1911fae..05ae1168391c 100644 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) { unsigned long addr = (unsigned long)vmf->address; -@@ -196,7 +215,7 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) +@@ -155,7 +174,7 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) mutex_lock(&encl->lock); @@ -110,7 +111,7 @@ index 7c63a1911fae..05ae1168391c 100644 if (IS_ERR(entry)) { mutex_unlock(&encl->lock); -@@ -344,7 +363,7 @@ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, +@@ -303,7 +322,7 @@ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, for ( ; ; ) { mutex_lock(&encl->lock); @@ -132,5 +133,5 @@ index fec43ca65065..6b34efba1602 100644 #endif /* _X86_ENCL_H */ -- -2.35.1 +2.35.2 |