summarylogtreecommitdiffstats
diff options
context:
space:
mode:
authorJarkko Sakkinen2022-04-05 07:55:53 +0300
committerJarkko Sakkinen2022-04-05 08:12:22 +0300
commit9e595612af76514fe6b9fecdc384a33473c7fe08 (patch)
tree2d2716e70c266aa4e0c4ba619318d69d258955c1
parent71077f1ec07d0cdb3bf641af1263865b04eaafa3 (diff)
downloadaur-9e595612af76514fe6b9fecdc384a33473c7fe08.tar.gz
bump sgx2 series v3
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@iki.fi>
-rw-r--r--0001-x86-sgx-Add-short-descriptions-to-ENCLS-wrappers.patch6
-rw-r--r--0002-x86-sgx-Add-wrapper-for-SGX2-EMODPR-function.patch11
-rw-r--r--0003-x86-sgx-Add-wrapper-for-SGX2-EMODT-function.patch6
-rw-r--r--0004-x86-sgx-Add-wrapper-for-SGX2-EAUG-function.patch6
-rw-r--r--0005-Documentation-x86-Document-SGX-permission-details.patch63
-rw-r--r--0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch136
-rw-r--r--0006-x86-sgx-Export-sgx_encl_ewb_cpumask.patch (renamed from 0009-x86-sgx-Export-sgx_encl_ewb_cpumask.patch)16
-rw-r--r--0006-x86-sgx-Support-VMA-permissions-more-relaxed-than-en.patch223
-rw-r--r--0007-x86-sgx-Add-pfn_mkwrite-handler-for-present-PTEs.patch161
-rw-r--r--0007-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch (renamed from 0010-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch)20
-rw-r--r--0008-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch (renamed from 0011-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch)18
-rw-r--r--0008-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot_bits-f.patch168
-rw-r--r--0009-x86-sgx-Make-sgx_ipi_cb-available-internally.patch (renamed from 0012-x86-sgx-Make-sgx_ipi_cb-available-internally.patch)8
-rw-r--r--0010-x86-sgx-Create-utility-to-validate-user-provided-off.patch (renamed from 0013-x86-sgx-Create-utility-to-validate-user-provided-off.patch)12
-rw-r--r--0011-x86-sgx-Keep-record-of-SGX-page-type.patch (renamed from 0014-x86-sgx-Keep-record-of-SGX-page-type.patch)33
-rw-r--r--0012-x86-sgx-Export-sgx_encl_-grow-shrink.patch52
-rw-r--r--0013-x86-sgx-Export-sgx_encl_page_alloc.patch120
-rw-r--r--0014-x86-sgx-Support-restricting-of-enclave-page-permissi.patch (renamed from 0016-x86-sgx-Support-restricting-of-enclave-page-permissi.patch)244
-rw-r--r--0015-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch (renamed from 0019-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch)100
-rw-r--r--0015-x86-sgx-Support-relaxing-of-enclave-page-permissions.patch312
-rw-r--r--0016-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch (renamed from 0020-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch)10
-rw-r--r--0017-x86-sgx-Support-modifying-SGX-page-type.patch (renamed from 0022-x86-sgx-Support-modifying-SGX-page-type.patch)61
-rw-r--r--0018-x86-sgx-Support-complete-page-removal.patch (renamed from 0023-x86-sgx-Support-complete-page-removal.patch)26
-rw-r--r--0019-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch (renamed from 0031-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch)18
-rw-r--r--0024-Documentation-x86-Introduce-enclave-runtime-manageme.patch47
-rw-r--r--0033-x86-sgx-Enable-PROT_EXEC-for-EAUG-d-pages.patch46
-rw-r--r--0034-Revert-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot.patch206
-rw-r--r--0035-x86-sgx-Free-backing-memory-after-faulting-the-encla.patch170
-rw-r--r--PKGBUILD35
29 files changed, 585 insertions, 1749 deletions
diff --git a/0001-x86-sgx-Add-short-descriptions-to-ENCLS-wrappers.patch b/0001-x86-sgx-Add-short-descriptions-to-ENCLS-wrappers.patch
index fc5947429922..b970b6966932 100644
--- a/0001-x86-sgx-Add-short-descriptions-to-ENCLS-wrappers.patch
+++ b/0001-x86-sgx-Add-short-descriptions-to-ENCLS-wrappers.patch
@@ -1,7 +1,7 @@
-From fd368818463168ceaa7904dcb0ac4072537c8fcc Mon Sep 17 00:00:00 2001
+From 39ca48af7c889fad9e0fcdea0003bac2ba074cf4 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:23 -0800
-Subject: [PATCH 01/34] x86/sgx: Add short descriptions to ENCLS wrappers
+Date: Wed, 22 Sep 2021 09:35:36 -0700
+Subject: [PATCH 01/30] x86/sgx: Add short descriptions to ENCLS wrappers
The SGX ENCLS instruction uses EAX to specify an SGX function and
may require additional registers, depending on the SGX function.
diff --git a/0002-x86-sgx-Add-wrapper-for-SGX2-EMODPR-function.patch b/0002-x86-sgx-Add-wrapper-for-SGX2-EMODPR-function.patch
index ccf5148a7f3d..e4c66a908fd8 100644
--- a/0002-x86-sgx-Add-wrapper-for-SGX2-EMODPR-function.patch
+++ b/0002-x86-sgx-Add-wrapper-for-SGX2-EMODPR-function.patch
@@ -1,7 +1,7 @@
-From 783686dae546c6d1f273a47cf36335bc7fdbaa89 Mon Sep 17 00:00:00 2001
+From 5c18c202d19f1e20b7a883a14ac428c54697f47c Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:24 -0800
-Subject: [PATCH 02/34] x86/sgx: Add wrapper for SGX2 EMODPR function
+Date: Mon, 17 May 2021 16:31:35 -0700
+Subject: [PATCH 02/30] x86/sgx: Add wrapper for SGX2 EMODPR function
Add a wrapper for the EMODPR ENCLS leaf function used to
restrict enclave page permissions as maintained in the
@@ -9,8 +9,9 @@ SGX hardware's Enclave Page Cache Map (EPCM).
EMODPR:
1) Updates the EPCM permissions of an enclave page by treating
- the new permissions as a mask - supplying a value that relaxes
- EPCM permissions has no effect.
+ the new permissions as a mask. Supplying a value that attempts
+ to relax EPCM permissions has no effect on EPCM permissions
+ (PR bit, see below, is changed).
2) Sets the PR bit in the EPCM entry of the enclave page to
indicate that permission restriction is in progress. The bit
is reset by the enclave by invoking ENCLU leaf function
diff --git a/0003-x86-sgx-Add-wrapper-for-SGX2-EMODT-function.patch b/0003-x86-sgx-Add-wrapper-for-SGX2-EMODT-function.patch
index 3c1cd27bf41d..3fe5af52a3b4 100644
--- a/0003-x86-sgx-Add-wrapper-for-SGX2-EMODT-function.patch
+++ b/0003-x86-sgx-Add-wrapper-for-SGX2-EMODT-function.patch
@@ -1,7 +1,7 @@
-From df48fa17c819d200e92b6862d39dc15f32c58e4e Mon Sep 17 00:00:00 2001
+From 9c86a3f70dadbcd0b31a624041ab25c658cdcb95 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:25 -0800
-Subject: [PATCH 03/34] x86/sgx: Add wrapper for SGX2 EMODT function
+Date: Fri, 7 Jan 2022 11:27:29 -0800
+Subject: [PATCH 03/30] x86/sgx: Add wrapper for SGX2 EMODT function
Add a wrapper for the EMODT ENCLS leaf function used to
change the type of an enclave page as maintained in the
diff --git a/0004-x86-sgx-Add-wrapper-for-SGX2-EAUG-function.patch b/0004-x86-sgx-Add-wrapper-for-SGX2-EAUG-function.patch
index a44344dd65e5..c036e9b8a4ac 100644
--- a/0004-x86-sgx-Add-wrapper-for-SGX2-EAUG-function.patch
+++ b/0004-x86-sgx-Add-wrapper-for-SGX2-EAUG-function.patch
@@ -1,7 +1,7 @@
-From dd3f8f27edf1638ae4eb10d40513ca6f290a826c Mon Sep 17 00:00:00 2001
+From d6d1ac0735a6e105e37ca3d010d59ce070566ea6 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:26 -0800
-Subject: [PATCH 04/34] x86/sgx: Add wrapper for SGX2 EAUG function
+Date: Fri, 7 Jan 2022 11:28:42 -0800
+Subject: [PATCH 04/30] x86/sgx: Add wrapper for SGX2 EAUG function
Add a wrapper for the EAUG ENCLS leaf function used to
add a page to an initialized enclave.
diff --git a/0005-Documentation-x86-Document-SGX-permission-details.patch b/0005-Documentation-x86-Document-SGX-permission-details.patch
deleted file mode 100644
index 1587e9be19b4..000000000000
--- a/0005-Documentation-x86-Document-SGX-permission-details.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From 6580ecec3c24b798a94858e32820e047dd14317c Mon Sep 17 00:00:00 2001
-From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:27 -0800
-Subject: [PATCH 05/34] Documentation/x86: Document SGX permission details
-
-Provide summary of the various permissions involved in
-managing access to enclave pages. This summary documents
-the foundation for additions related to runtime managing of
-enclave page permissions that is made possible with SGX2.
-
-Suggested-by: Andy Lutomirski <luto@kernel.org>
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
----
- Documentation/x86/sgx.rst | 28 +++++++++++++++++++++++-----
- 1 file changed, 23 insertions(+), 5 deletions(-)
-
-diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst
-index 265568a9292c..89ff924b1480 100644
---- a/Documentation/x86/sgx.rst
-+++ b/Documentation/x86/sgx.rst
-@@ -71,16 +71,34 @@ The processor tracks EPC pages in a hardware metadata structure called the
- which describes the owning enclave, access rights and page type among the other
- things.
-
--EPCM permissions are separate from the normal page tables. This prevents the
--kernel from, for instance, allowing writes to data which an enclave wishes to
--remain read-only. EPCM permissions may only impose additional restrictions on
--top of normal x86 page permissions.
--
- For all intents and purposes, the SGX architecture allows the processor to
- invalidate all EPCM entries at will. This requires that software be prepared to
- handle an EPCM fault at any time. In practice, this can happen on events like
- power transitions when the ephemeral key that encrypts enclave memory is lost.
-
-+Details about enclave page permissions
-+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+
-+EPCM permissions are separate from the normal page tables. This prevents the
-+kernel from, for instance, allowing writes to data which an enclave wishes
-+to remain read-only.
-+
-+Three permission masks are relevant to SGX:
-+
-+* EPCM permissions.
-+* Page Table Entry (PTE) permissions.
-+* Virtual Memory Area (VMA) permissions.
-+
-+An enclave is only able to access an enclave page if all three permission
-+masks enable it to do so.
-+
-+The relationships between the different permission masks are:
-+
-+* An SGX VMA can only be created if its permissions are the same or weaker
-+ than the EPCM permissions.
-+* PTEs are installed to match the EPCM permissions, but not be more
-+ relaxed than the VMA permissions.
-+
- Application interface
- =====================
-
---
-2.35.1
-
diff --git a/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch b/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch
new file mode 100644
index 000000000000..e8b0631e7c49
--- /dev/null
+++ b/0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch
@@ -0,0 +1,136 @@
+From 8e7efbd6ea3242525db8205614a6e7e87ab89415 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Wed, 9 Mar 2022 14:37:47 -0800
+Subject: [PATCH 05/30] x86/sgx: Support loading enclave page without VMA
+ permissions check
+
+sgx_encl_load_page() is used to find and load an enclave page into
+enclave (EPC) memory, potentially loading it from the backing storage.
+Both usages of sgx_encl_load_page() are during an access to the
+enclave page from a VMA and thus the permissions of the VMA are
+considered before the enclave page is loaded.
+
+SGX2 functions operating on enclave pages belonging to an initialized
+enclave requiring the page to be in EPC. It is thus required to
+support loading enclave pages into the EPC independent from a VMA.
+
+Split the current sgx_encl_load_page() to support the two usages:
+A new call, sgx_encl_load_page_in_vma(), behaves exactly like the
+current sgx_encl_load_page() that takes VMA permissions into account,
+while sgx_encl_load_page() just loads an enclave page into EPC.
+
+VMA, PTE, and EPCM permissions would continue to dictate whether
+the pages can be accessed from within an enclave.
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 57 ++++++++++++++++++++++------------
+ arch/x86/kernel/cpu/sgx/encl.h | 2 ++
+ 2 files changed, 40 insertions(+), 19 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index 7c63a1911fae..05ae1168391c 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -131,25 +131,10 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
+ return epc_page;
+ }
+
+-static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+- unsigned long addr,
+- unsigned long vm_flags)
++static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
++ struct sgx_encl_page *entry)
+ {
+- unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+ struct sgx_epc_page *epc_page;
+- struct sgx_encl_page *entry;
+-
+- entry = xa_load(&encl->page_array, PFN_DOWN(addr));
+- if (!entry)
+- return ERR_PTR(-EFAULT);
+-
+- /*
+- * Verify that the faulted page has equal or higher build time
+- * permissions than the VMA permissions (i.e. the subset of {VM_READ,
+- * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
+- */
+- if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
+- return ERR_PTR(-EFAULT);
+
+ /* Entry successfully located. */
+ if (entry->epc_page) {
+@@ -175,6 +160,40 @@ static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+ return entry;
+ }
+
++static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl,
++ unsigned long addr,
++ unsigned long vm_flags)
++{
++ unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
++ struct sgx_encl_page *entry;
++
++ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
++ if (!entry)
++ return ERR_PTR(-EFAULT);
++
++ /*
++ * Verify that the page has equal or higher build time
++ * permissions than the VMA permissions (i.e. the subset of {VM_READ,
++ * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
++ */
++ if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
++ return ERR_PTR(-EFAULT);
++
++ return __sgx_encl_load_page(encl, entry);
++}
++
++struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
++ unsigned long addr)
++{
++ struct sgx_encl_page *entry;
++
++ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
++ if (!entry)
++ return ERR_PTR(-EFAULT);
++
++ return __sgx_encl_load_page(encl, entry);
++}
++
+ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
+ {
+ unsigned long addr = (unsigned long)vmf->address;
+@@ -196,7 +215,7 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
+
+ mutex_lock(&encl->lock);
+
+- entry = sgx_encl_load_page(encl, addr, vma->vm_flags);
++ entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&encl->lock);
+
+@@ -344,7 +363,7 @@ static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
+ for ( ; ; ) {
+ mutex_lock(&encl->lock);
+
+- entry = sgx_encl_load_page(encl, addr, vm_flags);
++ entry = sgx_encl_load_page_in_vma(encl, addr, vm_flags);
+ if (PTR_ERR(entry) != -EBUSY)
+ break;
+
+diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
+index fec43ca65065..6b34efba1602 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.h
++++ b/arch/x86/kernel/cpu/sgx/encl.h
+@@ -116,5 +116,7 @@ unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
+ void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
+ bool sgx_va_page_full(struct sgx_va_page *va_page);
+ void sgx_encl_free_epc_page(struct sgx_epc_page *page);
++struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
++ unsigned long addr);
+
+ #endif /* _X86_ENCL_H */
+--
+2.35.1
+
diff --git a/0009-x86-sgx-Export-sgx_encl_ewb_cpumask.patch b/0006-x86-sgx-Export-sgx_encl_ewb_cpumask.patch
index 308deffd42ce..2f5c2faf5354 100644
--- a/0009-x86-sgx-Export-sgx_encl_ewb_cpumask.patch
+++ b/0006-x86-sgx-Export-sgx_encl_ewb_cpumask.patch
@@ -1,7 +1,7 @@
-From 72ff8cf57132e843e3dc95c3f49696c112f9a1b0 Mon Sep 17 00:00:00 2001
+From dfb13ff65827a0f229ee0401ea194cd47d17682e Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:31 -0800
-Subject: [PATCH 09/34] x86/sgx: Export sgx_encl_ewb_cpumask()
+Date: Thu, 3 Jun 2021 14:58:01 -0700
+Subject: [PATCH 06/30] x86/sgx: Export sgx_encl_ewb_cpumask()
Using sgx_encl_ewb_cpumask() to learn which CPUs might have executed
an enclave is useful to ensure that TLBs are cleared when changes are
@@ -30,10 +30,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
3 files changed, 68 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 85429db8c8b5..8cb99fc542a6 100644
+index 05ae1168391c..c6525eba74e8 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -636,6 +636,73 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+@@ -613,6 +613,73 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
return 0;
}
@@ -108,10 +108,10 @@ index 85429db8c8b5..8cb99fc542a6 100644
pgoff_t index)
{
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index dc262d843411..44431da21757 100644
+index 6b34efba1602..d2acb4debde5 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -106,6 +106,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+@@ -105,6 +105,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
void sgx_encl_release(struct kref *ref);
int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
@@ -120,7 +120,7 @@ index dc262d843411..44431da21757 100644
struct sgx_backing *backing);
void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
-index 4b41efc9e367..d481e8b0e7bc 100644
+index 8e4bc6453d26..2de85f459492 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -203,35 +203,6 @@ static void sgx_ipi_cb(void *info)
diff --git a/0006-x86-sgx-Support-VMA-permissions-more-relaxed-than-en.patch b/0006-x86-sgx-Support-VMA-permissions-more-relaxed-than-en.patch
deleted file mode 100644
index defee425b1d2..000000000000
--- a/0006-x86-sgx-Support-VMA-permissions-more-relaxed-than-en.patch
+++ /dev/null
@@ -1,223 +0,0 @@
-From 721f971c8c55d4b3eff23466c48235f121e038fe Mon Sep 17 00:00:00 2001
-From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:28 -0800
-Subject: [PATCH 06/34] x86/sgx: Support VMA permissions more relaxed than
- enclave permissions
-
-=== Summary ===
-
-An SGX VMA can only be created if its permissions are the same or
-weaker than the Enclave Page Cache Map (EPCM) permissions. After VMA
-creation this same rule is again enforced by the page fault handler:
-faulted enclave pages are required to have equal or more relaxed
-EPCM permissions than the VMA permissions.
-
-On SGX1 systems the additional enforcement in the page fault handler
-is redundant and on SGX2 systems it incorrectly prevents access.
-On SGX1 systems it is unnecessary to repeat the enforcement of the
-permission rule. The rule used during original VMA creation will
-ensure that any access attempt will use correct permissions.
-With SGX2 the EPCM permissions of a page can change after VMA
-creation resulting in the VMA permissions potentially being more
-relaxed than the EPCM permissions and the page fault handler
-incorrectly blocking valid access attempts.
-
-Enable the VMA's pages to remain accessible while ensuring that
-the PTEs are installed to match the EPCM permissions but not be
-more relaxed than the VMA permissions.
-
-=== Full Changelog ===
-
-An SGX enclave is an area of memory where parts of an application
-can reside. First an enclave is created and loaded (from
-non-enclave memory) with the code and data of an application,
-then user space can map (mmap()) the enclave memory to
-be able to enter the enclave at its defined entry points for
-execution within it.
-
-The hardware maintains a secure structure, the Enclave Page Cache Map
-(EPCM), that tracks the contents of the enclave. Of interest here is
-its tracking of the enclave page permissions. When a page is loaded
-into the enclave its permissions are specified and recorded in the
-EPCM. In parallel the kernel maintains permissions within the
-page table entries (PTEs) and the rule is that PTE permissions
-are not allowed to be more relaxed than the EPCM permissions.
-
-A new mapping (mmap()) of enclave memory can only succeed if the
-mapping has the same or weaker permissions than the permissions that
-were vetted during enclave creation. This is enforced by
-sgx_encl_may_map() that is called on the mmap() as well as mprotect()
-paths. This rule remains.
-
-One feature of SGX2 is to support the modification of EPCM permissions
-after enclave initialization. Enclave pages may thus already be part
-of a VMA at the time their EPCM permissions are changed resulting
-in the VMA's permissions potentially being more relaxed than the EPCM
-permissions.
-
-Allow permissions of existing VMAs to be more relaxed than EPCM
-permissions in preparation for dynamic EPCM permission changes
-made possible in SGX2. New VMAs that attempt to have more relaxed
-permissions than EPCM permissions continue to be unsupported.
-
-Reasons why permissions of existing VMAs are allowed to be more relaxed
-than EPCM permissions instead of dynamically changing VMA permissions
-when EPCM permissions change are:
-1) Changing VMA permissions involve splitting VMAs which is an
- operation that can fail. Additionally changing EPCM permissions of
- a range of pages could also fail on any of the pages involved.
- Handling these error cases causes problems. For example, if an
- EPCM permission change fails and the VMA has already been split
- then it is not possible to undo the VMA split nor possible to
- undo the EPCM permission changes that did succeed before the
- failure.
-2) The kernel has little insight into the user space where EPCM
- permissions are controlled from. For example, a RW page may
- be made RO just before it is made RX and splitting the VMAs
- while the VMAs may change soon is unnecessary.
-
-Remove the extra permission check called on a page fault
-(vm_operations_struct->fault) or during debugging
-(vm_operations_struct->access) when loading the enclave page from swap
-that ensures that the VMA permissions are not more relaxed than the
-EPCM permissions. Since a VMA could only exist if it passed the
-original permission checks during mmap() and a VMA may indeed
-have more relaxed permissions than the EPCM permissions this extra
-permission check is no longer appropriate.
-
-With the permission check removed, ensure that PTEs do
-not blindly inherit the VMA permissions but instead the permissions
-that the VMA and EPCM agree on. PTEs for writable pages (from VMA
-and enclave perspective) are installed with the writable bit set,
-reducing the need for this additional flow to the permission mismatch
-cases handled next.
-
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
----
- Documentation/x86/sgx.rst | 10 +++++++++
- arch/x86/kernel/cpu/sgx/encl.c | 38 ++++++++++++++++++----------------
- 2 files changed, 30 insertions(+), 18 deletions(-)
-
-diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst
-index 89ff924b1480..5659932728a5 100644
---- a/Documentation/x86/sgx.rst
-+++ b/Documentation/x86/sgx.rst
-@@ -99,6 +99,16 @@ The relationships between the different permission masks are:
- * PTEs are installed to match the EPCM permissions, but not be more
- relaxed than the VMA permissions.
-
-+On systems supporting SGX2 EPCM permissions may change while the
-+enclave page belongs to a VMA without impacting the VMA permissions.
-+This means that a running VMA may appear to allow access to an enclave
-+page that is not allowed by its EPCM permissions. For example, when an
-+enclave page with RW EPCM permissions is mapped by a RW VMA but is
-+subsequently changed to have read-only EPCM permissions. The kernel
-+continues to maintain correct access to the enclave page through the
-+PTE that will ensure that only access allowed by both the VMA
-+and EPCM permissions are permitted.
-+
- Application interface
- =====================
-
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 6fa3d0a14b93..2f80f9e5e8c6 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -132,10 +132,8 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
- }
-
- static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
-- unsigned long addr,
-- unsigned long vm_flags)
-+ unsigned long addr)
- {
-- unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
- struct sgx_epc_page *epc_page;
- struct sgx_encl_page *entry;
-
-@@ -143,14 +141,6 @@ static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
- if (!entry)
- return ERR_PTR(-EFAULT);
-
-- /*
-- * Verify that the faulted page has equal or higher build time
-- * permissions than the VMA permissions (i.e. the subset of {VM_READ,
-- * VM_WRITE, VM_EXECUTE} in vma->vm_flags).
-- */
-- if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits)
-- return ERR_PTR(-EFAULT);
--
- /* Entry successfully located. */
- if (entry->epc_page) {
- if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)
-@@ -179,7 +169,9 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
- {
- unsigned long addr = (unsigned long)vmf->address;
- struct vm_area_struct *vma = vmf->vma;
-+ unsigned long page_prot_bits;
- struct sgx_encl_page *entry;
-+ unsigned long vm_prot_bits;
- unsigned long phys_addr;
- struct sgx_encl *encl;
- vm_fault_t ret;
-@@ -196,7 +188,7 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
-
- mutex_lock(&encl->lock);
-
-- entry = sgx_encl_load_page(encl, addr, vma->vm_flags);
-+ entry = sgx_encl_load_page(encl, addr);
- if (IS_ERR(entry)) {
- mutex_unlock(&encl->lock);
-
-@@ -208,7 +200,19 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
-
- phys_addr = sgx_get_epc_phys_addr(entry->epc_page);
-
-- ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
-+ /*
-+ * Insert PTE to match the EPCM page permissions ensured to not
-+ * exceed the VMA permissions.
-+ */
-+ vm_prot_bits = vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
-+ page_prot_bits = entry->vm_max_prot_bits & vm_prot_bits;
-+ /*
-+ * Add VM_SHARED so that PTE is made writable right away if VMA
-+ * and EPCM are writable (no COW in SGX).
-+ */
-+ page_prot_bits |= (vma->vm_flags & VM_SHARED);
-+ ret = vmf_insert_pfn_prot(vma, addr, PFN_DOWN(phys_addr),
-+ vm_get_page_prot(page_prot_bits));
- if (ret != VM_FAULT_NOPAGE) {
- mutex_unlock(&encl->lock);
-
-@@ -336,15 +340,14 @@ static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *pag
- * Load an enclave page to EPC if required, and take encl->lock.
- */
- static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl,
-- unsigned long addr,
-- unsigned long vm_flags)
-+ unsigned long addr)
- {
- struct sgx_encl_page *entry;
-
- for ( ; ; ) {
- mutex_lock(&encl->lock);
-
-- entry = sgx_encl_load_page(encl, addr, vm_flags);
-+ entry = sgx_encl_load_page(encl, addr);
- if (PTR_ERR(entry) != -EBUSY)
- break;
-
-@@ -380,8 +383,7 @@ static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
- return -EFAULT;
-
- for (i = 0; i < len; i += cnt) {
-- entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK,
-- vma->vm_flags);
-+ entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
- break;
---
-2.35.1
-
diff --git a/0007-x86-sgx-Add-pfn_mkwrite-handler-for-present-PTEs.patch b/0007-x86-sgx-Add-pfn_mkwrite-handler-for-present-PTEs.patch
deleted file mode 100644
index 522d6c7e3a0f..000000000000
--- a/0007-x86-sgx-Add-pfn_mkwrite-handler-for-present-PTEs.patch
+++ /dev/null
@@ -1,161 +0,0 @@
-From 2cf17dff036b8b00dc43747698ae72f51c751361 Mon Sep 17 00:00:00 2001
-From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:29 -0800
-Subject: [PATCH 07/34] x86/sgx: Add pfn_mkwrite() handler for present PTEs
-
-By default a write page fault on a present PTE inherits the
-permissions of the VMA.
-
-When using SGX2, enclave page permissions maintained in the
-hardware's Enclave Page Cache Map (EPCM) may change after a VMA
-accessing the page is created. A VMA's permissions may thus be
-more relaxed than the EPCM permissions even though the VMA was
-originally created not to have more relaxed permissions. Following
-the default behavior during a page fault on a present PTE while
-the VMA permissions are more relaxed than the EPCM permissions would
-result in the PTE for an enclave page to be writable even
-though the page is not writable according to the EPCM permissions.
-
-The kernel should not allow writing to a page if that page is not
-writable: the PTE should accurately reflect the EPCM permissions
-while not being more relaxed than the VMA permissions.
-
-Do not blindly accept VMA permissions on a page fault due to a
-write attempt to a present PTE. Install a pfn_mkwrite() handler
-that ensures that the VMA permissions agree with the EPCM
-permissions in this regard.
-
-Before and after page fault flow scenarios
-==========================================
-
-Consider the following scenario that will be possible when using SGX2:
-* An enclave page exists with RW EPCM permissions.
-* A RW VMA maps the range spanning the enclave page.
-* The enclave page's EPCM permissions are changed to read-only.
-* There is no PTE for the enclave page.
-
-Considering that the PTE is not present in the scenario,
-user space will observe the following when attempting to write to the
-enclave page from within the enclave:
- 1) Instruction writing to enclave page is run from within the enclave.
- 2) A page fault with second and third bits set (0x6) is encountered
- and handled by the SGX handler sgx_vma_fault() that installs a
- read-only page table entry following previous patch that installs
- a PTE with permissions that VMA and enclave agree on
- (read-only in this case).
- 3) Instruction writing to enclave page is re-attempted.
- 4) A page fault with first three bits set (0x7) is encountered and
- transparently (from SGX driver and user space perspective) handled
- by the kernel with the PTE made writable because the VMA is
- writable.
- 5) Instruction writing to enclave page is re-attempted.
- 6) Since the EPCM permissions prevents writing to the page a new page
- fault is encountered, this time with the SGX flag set in the error
- code (0x8007). No action is taken by the kernel for this page fault
- and execution returns to user space.
- 7) Typically such a fault will be passed on to an application with a
- signal but if the enclave is entered with the vDSO function provided
- by the kernel then user space does not receive a signal but instead
- the vDSO function returns successfully with exception information
- (vector=14, error code=0x8007, and address) within the exception
- fields within the vDSO function's struct sgx_enclave_run.
-
-As can be observed it is not possible for user space to write to an
-enclave page if that page's EPCM permissions do not allow so,
-no matter what the VMA or PTE allows.
-
-Even so, the kernel should not allow writing to a page if that page is
-not writable. The PTE should accurately reflect the EPCM permissions.
-
-With a pfn_mkwrite() handler that ensures that the VMA permissions
-agree with the EPCM permissions user space observes the following
-when attempting to write to the enclave page from within the enclave:
- 1) Instruction writing to enclave page is run from within the enclave.
- 2) A page fault with second and third bits set (0x6) is encountered
- and handled by the SGX handler sgx_vma_fault() that installs a
- read-only page table entry following previous patch that installs
- a PTE with permissions that VMA and enclave agree on
- (read-only in this case).
- 3) Instruction writing to enclave page is re-attempted.
- 4) A page fault with first three bits set (0x7) is encountered and
- passed to the pfn_mkwrite() handler for consideration. The handler
- determines that the page should not be writable and returns SIGBUS.
- 5) Typically such a fault will be passed on to an application with a
- signal but if the enclave is entered with the vDSO function provided
- by the kernel then user space does not receive a signal but instead
- the vDSO function returns successfully with exception information
- (vector=14, error code=0x7, and address) within the exception fields
- within the vDSO function's struct sgx_enclave_run.
-
-The accurate exception information supports the SGX runtime, which is
-virtually always implemented inside a shared library, by providing
-accurate information in support of its management of the SGX enclave.
-
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
----
- arch/x86/kernel/cpu/sgx/encl.c | 42 ++++++++++++++++++++++++++++++++++
- 1 file changed, 42 insertions(+)
-
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 2f80f9e5e8c6..acedccf8c4ef 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -225,6 +225,47 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
- return VM_FAULT_NOPAGE;
- }
-
-+/*
-+ * A fault occurred while writing to a present enclave PTE. Since PTE is
-+ * present this will not be handled by sgx_vma_fault(). VMA may allow
-+ * writing to the page while enclave (as based on EPCM permissions) does
-+ * not. Do not follow the default of inheriting VMA permissions in this
-+ * regard, ensure enclave also allows writing to the page.
-+ */
-+static vm_fault_t sgx_vma_pfn_mkwrite(struct vm_fault *vmf)
-+{
-+ unsigned long addr = (unsigned long)vmf->address;
-+ struct vm_area_struct *vma = vmf->vma;
-+ struct sgx_encl_page *entry;
-+ struct sgx_encl *encl;
-+ vm_fault_t ret = 0;
-+
-+ encl = vma->vm_private_data;
-+
-+ /*
-+ * It's very unlikely but possible that allocating memory for the
-+ * mm_list entry of a forked process failed in sgx_vma_open(). When
-+ * this happens, vm_private_data is set to NULL.
-+ */
-+ if (unlikely(!encl))
-+ return VM_FAULT_SIGBUS;
-+
-+ mutex_lock(&encl->lock);
-+
-+ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
-+ if (!entry) {
-+ ret = VM_FAULT_SIGBUS;
-+ goto out;
-+ }
-+
-+ if (!(entry->vm_max_prot_bits & VM_WRITE))
-+ ret = VM_FAULT_SIGBUS;
-+
-+out:
-+ mutex_unlock(&encl->lock);
-+ return ret;
-+}
-+
- static void sgx_vma_open(struct vm_area_struct *vma)
- {
- struct sgx_encl *encl = vma->vm_private_data;
-@@ -422,6 +463,7 @@ const struct vm_operations_struct sgx_vm_ops = {
- .mprotect = sgx_vma_mprotect,
- .open = sgx_vma_open,
- .access = sgx_vma_access,
-+ .pfn_mkwrite = sgx_vma_pfn_mkwrite,
- };
-
- /**
---
-2.35.1
-
diff --git a/0010-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch b/0007-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch
index f7b34257766f..1c17f6f9257a 100644
--- a/0010-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch
+++ b/0007-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch
@@ -1,7 +1,7 @@
-From a755b1d15e60d9b081586bbaa219d9eb11e0f2bd Mon Sep 17 00:00:00 2001
+From 88c9ced2f94ffd6a7a3e5d3a0b6ad47bee0008d3 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:32 -0800
-Subject: [PATCH 10/34] x86/sgx: Rename sgx_encl_ewb_cpumask() as
+Date: Fri, 7 Jan 2022 16:08:11 -0800
+Subject: [PATCH 07/30] x86/sgx: Rename sgx_encl_ewb_cpumask() as
sgx_encl_cpumask()
sgx_encl_ewb_cpumask() is no longer unique to the reclaimer where it
@@ -26,10 +26,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 8cb99fc542a6..a8b23e21fd86 100644
+index c6525eba74e8..8de9bebc4d81 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -637,7 +637,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+@@ -614,7 +614,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
}
/**
@@ -38,7 +38,7 @@ index 8cb99fc542a6..a8b23e21fd86 100644
* @encl: the enclave
*
* Some SGX functions require that no cached linear-to-physical address
-@@ -662,7 +662,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+@@ -639,7 +639,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
* The following flow is used to support SGX functions that require that
* no cached linear-to-physical address mappings are present:
* 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
@@ -47,7 +47,7 @@ index 8cb99fc542a6..a8b23e21fd86 100644
* accessing the enclave.
* 3) Send IPI to identified CPUs, kicking them out of the enclave and
* thus flushing all locally cached linear-to-physical address mappings.
-@@ -679,7 +679,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+@@ -656,7 +656,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
*
* Return: cpumask of CPUs that might be accessing @encl
*/
@@ -57,10 +57,10 @@ index 8cb99fc542a6..a8b23e21fd86 100644
cpumask_t *cpumask = &encl->cpumask;
struct sgx_encl_mm *encl_mm;
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index 44431da21757..becb68503baa 100644
+index d2acb4debde5..e59c2cbf71e2 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -106,7 +106,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+@@ -105,7 +105,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
void sgx_encl_release(struct kref *ref);
int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
@@ -70,7 +70,7 @@ index 44431da21757..becb68503baa 100644
struct sgx_backing *backing);
void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
-index d481e8b0e7bc..60b166bff7b4 100644
+index 2de85f459492..fa33922879bf 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -249,7 +249,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
diff --git a/0011-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch b/0008-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch
index 920d5b6a07de..7785e277fcb9 100644
--- a/0011-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch
+++ b/0008-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch
@@ -1,7 +1,7 @@
-From 27ed183bbe2ab9f33c14b33fd1f47d8b2ab733f6 Mon Sep 17 00:00:00 2001
+From c7cbef91a72ec9afba76d518a2bc3102a13cfdb6 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:33 -0800
-Subject: [PATCH 11/34] x86/sgx: Move PTE zap code to new
+Date: Thu, 20 May 2021 14:23:58 -0700
+Subject: [PATCH 08/30] x86/sgx: Move PTE zap code to new
sgx_zap_enclave_ptes()
The SGX reclaimer removes page table entries pointing to pages that are
@@ -31,10 +31,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
3 files changed, 47 insertions(+), 31 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index a8b23e21fd86..0fd184fd25d7 100644
+index 8de9bebc4d81..c77a62432862 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -628,7 +628,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
+@@ -605,7 +605,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
spin_lock(&encl->mm_lock);
list_add_rcu(&encl_mm->list, &encl->mm_list);
@@ -43,7 +43,7 @@ index a8b23e21fd86..0fd184fd25d7 100644
smp_wmb();
encl->mm_list_version++;
spin_unlock(&encl->mm_lock);
-@@ -815,6 +815,49 @@ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+@@ -792,6 +792,49 @@ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
return ret;
}
@@ -94,10 +94,10 @@ index a8b23e21fd86..0fd184fd25d7 100644
* sgx_alloc_va_page() - Allocate a Version Array (VA) page
*
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index becb68503baa..82e21088e68b 100644
+index e59c2cbf71e2..1b15d22f6757 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -112,7 +112,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+@@ -111,7 +111,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
int sgx_encl_test_and_clear_young(struct mm_struct *mm,
struct sgx_encl_page *page);
@@ -107,7 +107,7 @@ index becb68503baa..82e21088e68b 100644
unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
-index 60b166bff7b4..06492dcffcf1 100644
+index fa33922879bf..ce9e87d5f8ec 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -137,36 +137,9 @@ static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
diff --git a/0008-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot_bits-f.patch b/0008-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot_bits-f.patch
deleted file mode 100644
index e05dcbd7cdc2..000000000000
--- a/0008-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot_bits-f.patch
+++ /dev/null
@@ -1,168 +0,0 @@
-From f2bdca656a164126d3a89893828d334ae008c93a Mon Sep 17 00:00:00 2001
-From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:30 -0800
-Subject: [PATCH 08/34] x86/sgx: x86/sgx: Add sgx_encl_page->vm_run_prot_bits
- for dynamic permission changes
-
-Enclave creators declare their enclave page permissions (EPCM
-permissions) at the time the pages are added to the enclave. These
-page permissions are the vetted permissible accesses of the enclave
-pages and stashed off (in struct sgx_encl_page->vm_max_prot_bits)
-for later comparison with enclave PTEs and VMAs.
-
-Current permission support assume that EPCM permissions remain static
-for the lifetime of the enclave. This is about to change with the
-addition of support for SGX2 where the EPCM permissions of enclave
-pages belonging to an initialized enclave may change during the
-enclave's lifetime.
-
-Support for changing of EPCM permissions should continue to respect
-the vetted maximum protection bits maintained in
-sgx_encl_page->vm_max_prot_bits. Towards this end, add
-sgx_encl_page->vm_run_prot_bits in preparation for support of
-enclave page permission changes. sgx_encl_page->vm_run_prot_bits
-reflect the active EPCM permissions of an enclave page and are not to
-exceed sgx_encl_page->vm_max_prot_bits.
-
-Two permission fields are used: sgx_encl_page->vm_run_prot_bits
-reflects the current EPCM permissions and is used to manage the page
-table entries while sgx_encl_page->vm_max_prot_bits contains the vetted
-maximum protection bits and is used to guide which EPCM permissions
-are allowed in the upcoming SGX2 permission changing support (it guides
-what values sgx_encl_page->vm_run_prot_bits may have).
-
-Consider this example how sgx_encl_page->vm_max_prot_bits and
-sgx_encl_page->vm_run_prot_bits are used:
-
-(1) Add an enclave page with secinfo of RW to an uninitialized enclave:
- sgx_encl_page->vm_max_prot_bits = RW
- sgx_encl_page->vm_run_prot_bits = RW
-
- At this point RW VMAs would be allowed to access this page and PTEs
- would allow write access as guided by
- sgx_encl_page->vm_run_prot_bits.
-
-(2) User space invokes SGX2 to change the EPCM permissions to read-only.
- This is allowed because sgx_encl_page->vm_max_prot_bits = RW:
- sgx_encl_page->vm_max_prot_bits = RW
- sgx_encl_page->vm_run_prot_bits = R
-
- At this point only new read-only VMAs would be allowed to access
- this page and PTEs would not allow write access as guided
- by sgx_encl_page->vm_run_prot_bits.
-
-(3) User space invokes SGX2 to change the EPCM permissions to RX.
- This will not be supported by the kernel because
- sgx_encl_page->vm_max_prot_bits = RW:
- sgx_encl_page->vm_max_prot_bits = RW
- sgx_encl_page->vm_run_prot_bits = R
-
-(3) User space invokes SGX2 to change the EPCM permissions to RW.
- This will be allowed because sgx_encl_page->vm_max_prot_bits = RW:
- sgx_encl_page->vm_max_prot_bits = RW
- sgx_encl_page->vm_run_prot_bits = RW
-
- At this point RW VMAs would again be allowed to access this page
- and PTEs would allow write access as guided by
- sgx_encl_page->vm_run_prot_bits.
-
-struct sgx_encl_page hosting this information is maintained for each
-enclave page so the space consumed by the struct is important.
-The existing sgx_encl_page->vm_max_prot_bits is already unsigned long
-while only using three bits. Transition to a bitfield for the two
-members containing protection bits.
-
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
----
- Documentation/x86/sgx.rst | 10 ++++++++++
- arch/x86/kernel/cpu/sgx/encl.c | 6 +++---
- arch/x86/kernel/cpu/sgx/encl.h | 3 ++-
- arch/x86/kernel/cpu/sgx/ioctl.c | 6 ++++++
- 4 files changed, 21 insertions(+), 4 deletions(-)
-
-diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst
-index 5659932728a5..9df620b59f83 100644
---- a/Documentation/x86/sgx.rst
-+++ b/Documentation/x86/sgx.rst
-@@ -99,6 +99,16 @@ The relationships between the different permission masks are:
- * PTEs are installed to match the EPCM permissions, but not be more
- relaxed than the VMA permissions.
-
-+During runtime the EPCM permissions of enclave pages belonging to an
-+initialized enclave can change on systems supporting SGX2. In support
-+of these runtime changes the kernel maintains (for each enclave page)
-+the most permissive EPCM permission mask allowed by policy as
-+the ``vm_max_prot_bits`` of that page. EPCM permissions are not allowed
-+to be relaxed beyond ``vm_max_prot_bits``. The kernel also maintains
-+the currently active EPCM permissions of an enclave page as its
-+``vm_run_prot_bits`` to ensure PTEs and new VMAs respect the active
-+EPCM permission values.
-+
- On systems supporting SGX2 EPCM permissions may change while the
- enclave page belongs to a VMA without impacting the VMA permissions.
- This means that a running VMA may appear to allow access to an enclave
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index acedccf8c4ef..85429db8c8b5 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -205,7 +205,7 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
- * exceed the VMA permissions.
- */
- vm_prot_bits = vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
-- page_prot_bits = entry->vm_max_prot_bits & vm_prot_bits;
-+ page_prot_bits = entry->vm_run_prot_bits & vm_prot_bits;
- /*
- * Add VM_SHARED so that PTE is made writable right away if VMA
- * and EPCM are writable (no COW in SGX).
-@@ -258,7 +258,7 @@ static vm_fault_t sgx_vma_pfn_mkwrite(struct vm_fault *vmf)
- goto out;
- }
-
-- if (!(entry->vm_max_prot_bits & VM_WRITE))
-+ if (!(entry->vm_run_prot_bits & VM_WRITE))
- ret = VM_FAULT_SIGBUS;
-
- out:
-@@ -321,7 +321,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
- mutex_lock(&encl->lock);
- xas_lock(&xas);
- xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
-- if (~page->vm_max_prot_bits & vm_prot_bits) {
-+ if (~page->vm_run_prot_bits & vm_prot_bits) {
- ret = -EACCES;
- break;
- }
-diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index fec43ca65065..dc262d843411 100644
---- a/arch/x86/kernel/cpu/sgx/encl.h
-+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -27,7 +27,8 @@
-
- struct sgx_encl_page {
- unsigned long desc;
-- unsigned long vm_max_prot_bits;
-+ unsigned long vm_max_prot_bits:8;
-+ unsigned long vm_run_prot_bits:8;
- struct sgx_epc_page *epc_page;
- struct sgx_encl *encl;
- struct sgx_va_page *va_page;
-diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 83df20e3e633..7e0819a89532 100644
---- a/arch/x86/kernel/cpu/sgx/ioctl.c
-+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -197,6 +197,12 @@ static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
- /* Calculate maximum of the VM flags for the page. */
- encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
-
-+ /*
-+ * At time of allocation, the runtime protection bits are the same
-+ * as the maximum protection bits.
-+ */
-+ encl_page->vm_run_prot_bits = encl_page->vm_max_prot_bits;
-+
- return encl_page;
- }
-
---
-2.35.1
-
diff --git a/0012-x86-sgx-Make-sgx_ipi_cb-available-internally.patch b/0009-x86-sgx-Make-sgx_ipi_cb-available-internally.patch
index 85c89d635708..b8c7e966716e 100644
--- a/0012-x86-sgx-Make-sgx_ipi_cb-available-internally.patch
+++ b/0009-x86-sgx-Make-sgx_ipi_cb-available-internally.patch
@@ -1,7 +1,7 @@
-From ef7d7909abfc46b2a8b3aaa0ab5375033688123e Mon Sep 17 00:00:00 2001
+From ffff3007d13c58adc39a670951586a40cb999b75 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:34 -0800
-Subject: [PATCH 12/34] x86/sgx: Make sgx_ipi_cb() available internally
+Date: Thu, 3 Jun 2021 16:56:53 -0700
+Subject: [PATCH 09/30] x86/sgx: Make sgx_ipi_cb() available internally
The ETRACK function followed by an IPI to all CPUs within an enclave
is a common pattern with more frequent use in support of SGX2.
@@ -16,7 +16,7 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
-index 06492dcffcf1..1a3014aec490 100644
+index ce9e87d5f8ec..6e2cb7564080 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -172,7 +172,7 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
diff --git a/0013-x86-sgx-Create-utility-to-validate-user-provided-off.patch b/0010-x86-sgx-Create-utility-to-validate-user-provided-off.patch
index dad6bc6267d8..a9105252064e 100644
--- a/0013-x86-sgx-Create-utility-to-validate-user-provided-off.patch
+++ b/0010-x86-sgx-Create-utility-to-validate-user-provided-off.patch
@@ -1,7 +1,7 @@
-From 3f7c2ce6cf953014eb405b35f3f31e3bd3a7c757 Mon Sep 17 00:00:00 2001
+From ecacf0430a389f012555938381664e8f8d86af54 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:35 -0800
-Subject: [PATCH 13/34] x86/sgx: Create utility to validate user provided
+Date: Fri, 21 Jan 2022 13:35:21 -0800
+Subject: [PATCH 10/30] x86/sgx: Create utility to validate user provided
offset and length
User provided offset and length is validated when parsing the parameters
@@ -15,10 +15,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 7e0819a89532..6e7cc441156b 100644
+index 83df20e3e633..f487549bccba 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -378,6 +378,26 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
+@@ -372,6 +372,26 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
return ret;
}
@@ -45,7 +45,7 @@ index 7e0819a89532..6e7cc441156b 100644
/**
* sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES
* @encl: an enclave pointer
-@@ -431,14 +451,10 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
+@@ -425,14 +445,10 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
if (copy_from_user(&add_arg, arg, sizeof(add_arg)))
return -EFAULT;
diff --git a/0014-x86-sgx-Keep-record-of-SGX-page-type.patch b/0011-x86-sgx-Keep-record-of-SGX-page-type.patch
index b3d8af97fe35..6733c5307d45 100644
--- a/0014-x86-sgx-Keep-record-of-SGX-page-type.patch
+++ b/0011-x86-sgx-Keep-record-of-SGX-page-type.patch
@@ -1,7 +1,7 @@
-From b5fb28ae93bf2d97c3a3aae2d37f94a6789d51ce Mon Sep 17 00:00:00 2001
+From d759347c5feada6386aa3883bb9e0c18d377bd77 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:36 -0800
-Subject: [PATCH 14/34] x86/sgx: Keep record of SGX page type
+Date: Wed, 9 Jun 2021 15:59:02 -0700
+Subject: [PATCH 11/30] x86/sgx: Keep record of SGX page type
SGX2 functions are not allowed on all page types. For example,
ENCLS[EMODPR] is only allowed on regular SGX enclave pages and
@@ -20,17 +20,20 @@ a different way. The SGX2 instructions needing the page type do not
operate on VA pages and this is thus not a scenario needing to
be covered at this time.
-With the protection bits consuming 16 bits of the unsigned long
-there is room available in the bitfield to include the page type
-information without increasing the space consumed by the struct.
+struct sgx_encl_page hosting this information is maintained for each
+enclave page so the space consumed by the struct is important.
+The existing sgx_encl_page->vm_max_prot_bits is already unsigned long
+while only using three bits. Transition to a bitfield for the two
+members to support the additional information without increasing
+the space consumed by the struct.
Acked-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
---
arch/x86/include/asm/sgx.h | 3 +++
- arch/x86/kernel/cpu/sgx/encl.h | 1 +
+ arch/x86/kernel/cpu/sgx/encl.h | 3 ++-
arch/x86/kernel/cpu/sgx/ioctl.c | 2 ++
- 3 files changed, 6 insertions(+)
+ 3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
index d67810b50a81..eae20fa52b93 100644
@@ -47,19 +50,21 @@ index d67810b50a81..eae20fa52b93 100644
enum sgx_page_type {
SGX_PAGE_TYPE_SECS,
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index 82e21088e68b..cb9f16d457ac 100644
+index 1b15d22f6757..07abfc70c8e3 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -29,6 +29,7 @@ struct sgx_encl_page {
+@@ -27,7 +27,8 @@
+
+ struct sgx_encl_page {
unsigned long desc;
- unsigned long vm_max_prot_bits:8;
- unsigned long vm_run_prot_bits:8;
+- unsigned long vm_max_prot_bits;
++ unsigned long vm_max_prot_bits:8;
+ enum sgx_page_type type:16;
struct sgx_epc_page *epc_page;
struct sgx_encl *encl;
struct sgx_va_page *va_page;
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 6e7cc441156b..b8336d5d9029 100644
+index f487549bccba..0c211af8e948 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -107,6 +107,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
@@ -70,7 +75,7 @@ index 6e7cc441156b..b8336d5d9029 100644
encl->base = secs->base;
encl->size = secs->size;
encl->attributes = secs->attributes;
-@@ -350,6 +351,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
+@@ -344,6 +345,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src,
*/
encl_page->encl = encl;
encl_page->epc_page = epc_page;
diff --git a/0012-x86-sgx-Export-sgx_encl_-grow-shrink.patch b/0012-x86-sgx-Export-sgx_encl_-grow-shrink.patch
new file mode 100644
index 000000000000..900f5e5c0015
--- /dev/null
+++ b/0012-x86-sgx-Export-sgx_encl_-grow-shrink.patch
@@ -0,0 +1,52 @@
+From 1408fa054374699618f56528ce7eda1a9640b0aa Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 10 Mar 2022 16:08:12 -0800
+Subject: [PATCH 12/30] x86/sgx: Export sgx_encl_{grow,shrink}()
+
+In order to use sgx_encl_{grow,shrink}() in the page augmentation code
+located in encl.c, export these functions.
+
+Suggested-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+---
+ arch/x86/kernel/cpu/sgx/encl.h | 2 ++
+ arch/x86/kernel/cpu/sgx/ioctl.c | 4 ++--
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
+index 07abfc70c8e3..9d673d9531f0 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.h
++++ b/arch/x86/kernel/cpu/sgx/encl.h
+@@ -120,5 +120,7 @@ bool sgx_va_page_full(struct sgx_va_page *va_page);
+ void sgx_encl_free_epc_page(struct sgx_epc_page *page);
+ struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+ unsigned long addr);
++struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl);
++void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page);
+
+ #endif /* _X86_ENCL_H */
+diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
+index 0c211af8e948..746acddbb774 100644
+--- a/arch/x86/kernel/cpu/sgx/ioctl.c
++++ b/arch/x86/kernel/cpu/sgx/ioctl.c
+@@ -17,7 +17,7 @@
+ #include "encl.h"
+ #include "encls.h"
+
+-static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
++struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
+ {
+ struct sgx_va_page *va_page = NULL;
+ void *err;
+@@ -43,7 +43,7 @@ static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
+ return va_page;
+ }
+
+-static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
++void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
+ {
+ encl->page_cnt--;
+
+--
+2.35.1
+
diff --git a/0013-x86-sgx-Export-sgx_encl_page_alloc.patch b/0013-x86-sgx-Export-sgx_encl_page_alloc.patch
new file mode 100644
index 000000000000..c0c97adec60d
--- /dev/null
+++ b/0013-x86-sgx-Export-sgx_encl_page_alloc.patch
@@ -0,0 +1,120 @@
+From b53096d9fcb368a8d2568c04817c4d2ebd4b24d4 Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko@kernel.org>
+Date: Tue, 8 Mar 2022 13:28:32 +0200
+Subject: [PATCH 13/30] x86/sgx: Export sgx_encl_page_alloc()
+
+Move sgx_encl_page_alloc() to encl.c and export it so that it can be
+used in the implementation for support of adding pages to initialized
+enclaves, which requires to allocate new enclave pages.
+
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+---
+ arch/x86/kernel/cpu/sgx/encl.c | 32 ++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/sgx/encl.h | 3 +++
+ arch/x86/kernel/cpu/sgx/ioctl.c | 32 --------------------------------
+ 3 files changed, 35 insertions(+), 32 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
+index c77a62432862..546423753e4c 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.c
++++ b/arch/x86/kernel/cpu/sgx/encl.c
+@@ -792,6 +792,38 @@ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+ return ret;
+ }
+
++struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
++ unsigned long offset,
++ u64 secinfo_flags)
++{
++ struct sgx_encl_page *encl_page;
++ unsigned long prot;
++
++ encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
++ if (!encl_page)
++ return ERR_PTR(-ENOMEM);
++
++ encl_page->desc = encl->base + offset;
++ encl_page->encl = encl;
++
++ prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
++ _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
++ _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
++
++ /*
++ * TCS pages must always RW set for CPU access while the SECINFO
++ * permissions are *always* zero - the CPU ignores the user provided
++ * values and silently overwrites them with zero permissions.
++ */
++ if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
++ prot |= PROT_READ | PROT_WRITE;
++
++ /* Calculate maximum of the VM flags for the page. */
++ encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
++
++ return encl_page;
++}
++
+ /**
+ * sgx_zap_enclave_ptes() - remove PTEs mapping the address from enclave
+ * @encl: the enclave
+diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
+index 9d673d9531f0..253ebdd1c5be 100644
+--- a/arch/x86/kernel/cpu/sgx/encl.h
++++ b/arch/x86/kernel/cpu/sgx/encl.h
+@@ -112,6 +112,9 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
+ void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
+ int sgx_encl_test_and_clear_young(struct mm_struct *mm,
+ struct sgx_encl_page *page);
++struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
++ unsigned long offset,
++ u64 secinfo_flags);
+ void sgx_zap_enclave_ptes(struct sgx_encl *encl, unsigned long addr);
+ struct sgx_epc_page *sgx_alloc_va_page(void);
+ unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
+diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
+index 746acddbb774..0460fd224a05 100644
+--- a/arch/x86/kernel/cpu/sgx/ioctl.c
++++ b/arch/x86/kernel/cpu/sgx/ioctl.c
+@@ -169,38 +169,6 @@ static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
+ return ret;
+ }
+
+-static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
+- unsigned long offset,
+- u64 secinfo_flags)
+-{
+- struct sgx_encl_page *encl_page;
+- unsigned long prot;
+-
+- encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
+- if (!encl_page)
+- return ERR_PTR(-ENOMEM);
+-
+- encl_page->desc = encl->base + offset;
+- encl_page->encl = encl;
+-
+- prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) |
+- _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) |
+- _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC);
+-
+- /*
+- * TCS pages must always RW set for CPU access while the SECINFO
+- * permissions are *always* zero - the CPU ignores the user provided
+- * values and silently overwrites them with zero permissions.
+- */
+- if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS)
+- prot |= PROT_READ | PROT_WRITE;
+-
+- /* Calculate maximum of the VM flags for the page. */
+- encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
+-
+- return encl_page;
+-}
+-
+ static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
+ {
+ u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK;
+--
+2.35.1
+
diff --git a/0016-x86-sgx-Support-restricting-of-enclave-page-permissi.patch b/0014-x86-sgx-Support-restricting-of-enclave-page-permissi.patch
index b11fd2de8e70..fcd03a7cb57b 100644
--- a/0016-x86-sgx-Support-restricting-of-enclave-page-permissi.patch
+++ b/0014-x86-sgx-Support-restricting-of-enclave-page-permissi.patch
@@ -1,7 +1,7 @@
-From 20d33afacabde997e8a99e4bd08a424d1a40669b Mon Sep 17 00:00:00 2001
+From cd16553f3cb42e94f3e5ed83c28375e85c4462f7 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:38 -0800
-Subject: [PATCH 16/34] x86/sgx: Support restricting of enclave page
+Date: Mon, 7 Jun 2021 09:13:44 -0700
+Subject: [PATCH 14/30] x86/sgx: Support restricting of enclave page
permissions
In the initial (SGX1) version of SGX, pages in an enclave need to be
@@ -16,13 +16,11 @@ pages within an initialized enclave.
Introduce ioctl() SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS to support
restricting EPCM permissions. With this ioctl() the user specifies
-a page range and the permissions to be applied to all pages in
-the provided range. After checking the new permissions (more detail
-below) the page table entries are reset and any new page
-table entries will contain the new, restricted, permissions.
-ENCLS[EMODPR] is run to restrict the EPCM permissions followed by
-the ENCLS[ETRACK] flow that will ensure no cached
-linear-to-physical address mappings to the changed pages remain.
+a page range and the EPCM permissions to be applied to all pages in
+the provided range. ENCLS[EMODPR] is run to restrict the EPCM
+permissions followed by the ENCLS[ETRACK] flow that will ensure
+no cached linear-to-physical address mappings to the changed
+pages remain.
It is possible for the permission change request to fail on any
page within the provided range, either with an error encountered
@@ -32,53 +30,45 @@ error code based on failures encountered by the kernel as well
as two result output parameters: one for the number of pages
that were successfully changed and one for the SGX return code.
-Checking user provided new permissions
-======================================
+The page table entry permissions are not impacted by the EPCM
+permission changes. VMAs and PTEs will continue to allow the
+maximum vetted permissions determined at the time the pages
+are added to the enclave. The SGX error code in a page fault
+will indicate if it was an EPCM permission check that prevented
+an access attempt.
-Enclave page permission changes need to be approached with care and
-for this reason permission changes are only allowed if the new
-permissions are the same or more restrictive that the vetted
-permissions. No additional checking is done to ensure that the
-permissions are actually being restricted. This is because the
-enclave may have relaxed the EPCM permissions from within
-the enclave without letting the kernel know. An attempt to relax
-permissions using this call will be ignored by the hardware.
-
-For example, together with the support for relaxing of EPCM permissions,
-enclave pages added with the vetted permissions in brackets below
-are allowed to have permissions as follows:
-* (RWX) => RW => R => RX => RWX
-* (RW) => R => RW
-* (RX) => R => RX
+No checking is done to ensure that the permissions are actually
+being restricted. This is because the enclave may have relaxed
+the EPCM permissions from within the enclave without letting the
+kernel know. An attempt to relax permissions using this call will
+be ignored by the hardware.
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
---
arch/x86/include/uapi/asm/sgx.h | 21 +++
- arch/x86/kernel/cpu/sgx/encl.c | 4 +-
- arch/x86/kernel/cpu/sgx/encl.h | 3 +
- arch/x86/kernel/cpu/sgx/ioctl.c | 229 ++++++++++++++++++++++++++++++++
- 4 files changed, 255 insertions(+), 2 deletions(-)
+ arch/x86/kernel/cpu/sgx/ioctl.c | 242 ++++++++++++++++++++++++++++++++
+ 2 files changed, 263 insertions(+)
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
-index 5c678b27bb72..b0ffb80bc67f 100644
+index f4b81587e90b..a0a24e94fb27 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
-@@ -31,6 +31,8 @@ enum sgx_page_flags {
+@@ -29,6 +29,8 @@ enum sgx_page_flags {
+ _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision)
+ #define SGX_IOC_VEPC_REMOVE_ALL \
_IO(SGX_MAGIC, 0x04)
- #define SGX_IOC_ENCLAVE_RELAX_PERMISSIONS \
- _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_relax_perm)
+#define SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS \
-+ _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_restrict_perm)
++ _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_restrict_permissions)
/**
* struct sgx_enclave_create - parameter structure for the
-@@ -95,6 +97,25 @@ struct sgx_enclave_relax_perm {
- __u64 count;
+@@ -76,6 +78,25 @@ struct sgx_enclave_provision {
+ __u64 fd;
};
+/**
-+ * struct sgx_enclave_restrict_perm - parameters for ioctl
-+ * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
++ * struct sgx_enclave_restrict_permissions - parameters for ioctl
++ * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
+ * @offset: starting page offset (page aligned relative to enclave base
+ * address defined in SECS)
+ * @length: length of memory (multiple of the page size)
@@ -87,7 +77,7 @@ index 5c678b27bb72..b0ffb80bc67f 100644
+ * @result: (output) SGX result code of ENCLS[EMODPR] function
+ * @count: (output) bytes successfully changed (multiple of page size)
+ */
-+struct sgx_enclave_restrict_perm {
++struct sgx_enclave_restrict_permissions {
+ __u64 offset;
+ __u64 length;
+ __u64 secinfo;
@@ -98,42 +88,66 @@ index 5c678b27bb72..b0ffb80bc67f 100644
struct sgx_enclave_run;
/**
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 0fd184fd25d7..cbd852fb760b 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -131,8 +131,8 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
- return epc_page;
- }
-
--static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
-- unsigned long addr)
-+struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
-+ unsigned long addr)
- {
- struct sgx_epc_page *epc_page;
- struct sgx_encl_page *entry;
-diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index cb9f16d457ac..848a28d28d3d 100644
---- a/arch/x86/kernel/cpu/sgx/encl.h
-+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -120,4 +120,7 @@ void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
- bool sgx_va_page_full(struct sgx_va_page *va_page);
- void sgx_encl_free_epc_page(struct sgx_epc_page *page);
-
-+struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
-+ unsigned long addr);
-+
- #endif /* _X86_ENCL_H */
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 9cc6af404bf6..23bdf558b231 100644
+index 0460fd224a05..4d88bfd163e7 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -894,6 +894,232 @@ static long sgx_ioc_enclave_relax_perm(struct sgx_encl *encl, void __user *arg)
- return ret;
+@@ -660,6 +660,244 @@ static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
+ return sgx_set_attribute(&encl->attributes_mask, params.fd);
}
+/*
++ * Ensure enclave is ready for SGX2 functions. Readiness is checked
++ * by ensuring the hardware supports SGX2 and the enclave is initialized
++ * and thus able to handle requests to modify pages within it.
++ */
++static int sgx_ioc_sgx2_ready(struct sgx_encl *encl)
++{
++ if (!(cpu_feature_enabled(X86_FEATURE_SGX2)))
++ return -ENODEV;
++
++ if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
++ return -EINVAL;
++
++ return 0;
++}
++
++/*
++ * Return valid permission fields from a secinfo structure provided by
++ * user space. The secinfo structure is required to only have bits in
++ * the permission fields set.
++ */
++static int sgx_perm_from_user_secinfo(void __user *_secinfo, u64 *secinfo_perm)
++{
++ struct sgx_secinfo secinfo;
++ u64 perm;
++
++ if (copy_from_user(&secinfo, (void __user *)_secinfo,
++ sizeof(secinfo)))
++ return -EFAULT;
++
++ if (secinfo.flags & ~SGX_SECINFO_PERMISSION_MASK)
++ return -EINVAL;
++
++ if (memchr_inv(secinfo.reserved, 0, sizeof(secinfo.reserved)))
++ return -EINVAL;
++
++ perm = secinfo.flags & SGX_SECINFO_PERMISSION_MASK;
++
++ /*
++ * Read access is required for the enclave to be able to use the page.
++ * SGX instructions like ENCLU[EMODPE] and ENCLU[EACCEPT] require
++ * read access.
++ */
++ if (!(perm & SGX_SECINFO_R))
++ return -EINVAL;
++
++ *secinfo_perm = perm;
++
++ return 0;
++}
++
++/*
+ * Some SGX functions require that no cached linear-to-physical address
+ * mappings are present before they can succeed. Collaborate with
+ * hardware via ENCLS[ETRACK] to ensure that all cached
@@ -172,7 +186,7 @@ index 9cc6af404bf6..23bdf558b231 100644
+}
+
+/**
-+ * sgx_enclave_restrict_perm() - Restrict EPCM permissions and align OS view
++ * sgx_enclave_restrict_permissions() - Restrict EPCM permissions
+ * @encl: Enclave to which the pages belong.
+ * @modp: Checked parameters from user on which pages need modifying.
+ * @secinfo_perm: New (validated) permission bits.
@@ -181,11 +195,11 @@ index 9cc6af404bf6..23bdf558b231 100644
+ * - 0: Success.
+ * - -errno: Otherwise.
+ */
-+static long sgx_enclave_restrict_perm(struct sgx_encl *encl,
-+ struct sgx_enclave_restrict_perm *modp,
-+ u64 secinfo_perm)
++static long
++sgx_enclave_restrict_permissions(struct sgx_encl *encl,
++ struct sgx_enclave_restrict_permissions *modp,
++ u64 secinfo_perm)
+{
-+ unsigned long vm_prot, run_prot_restore;
+ struct sgx_encl_page *entry;
+ struct sgx_secinfo secinfo;
+ unsigned long addr;
@@ -196,8 +210,6 @@ index 9cc6af404bf6..23bdf558b231 100644
+ memset(&secinfo, 0, sizeof(secinfo));
+ secinfo.flags = secinfo_perm;
+
-+ vm_prot = vm_prot_from_secinfo(secinfo_perm);
-+
+ for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
+ addr = encl->base + modp->offset + c;
+
@@ -220,41 +232,12 @@ index 9cc6af404bf6..23bdf558b231 100644
+ }
+
+ /*
-+ * Do not verify if current runtime protection bits are what
-+ * is being requested. The enclave may have relaxed EPCM
-+ * permissions calls without letting the kernel know and
-+ * thus permission restriction may still be needed even if
-+ * from the kernel's perspective the permissions are unchanged.
-+ */
-+
-+ /* New permissions should never exceed vetted permissions. */
-+ if ((entry->vm_max_prot_bits & vm_prot) != vm_prot) {
-+ ret = -EPERM;
-+ goto out_unlock;
-+ }
-+
-+ /* Make sure page stays around while releasing mutex. */
-+ if (sgx_unmark_page_reclaimable(entry->epc_page)) {
-+ ret = -EAGAIN;
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * Change runtime protection before zapping PTEs to ensure
-+ * any new #PF uses new permissions. EPCM permissions (if
-+ * needed) not changed yet.
-+ */
-+ run_prot_restore = entry->vm_run_prot_bits;
-+ entry->vm_run_prot_bits = vm_prot;
-+
-+ mutex_unlock(&encl->lock);
-+ /*
-+ * Do not keep encl->lock because of dependency on
-+ * mmap_lock acquired in sgx_zap_enclave_ptes().
++ * Do not verify the permission bits requested. Kernel
++ * has no control over how EPCM permissions can be relaxed
++ * from within the enclave. ENCLS[EMODPR] can only
++ * remove existing EPCM permissions, attempting to set
++ * new permissions will be ignored by the hardware.
+ */
-+ sgx_zap_enclave_ptes(encl, addr);
-+
-+ mutex_lock(&encl->lock);
+
+ /* Change EPCM permissions. */
+ epc_virt = sgx_get_epc_virt_addr(entry->epc_page);
@@ -270,31 +253,26 @@ index 9cc6af404bf6..23bdf558b231 100644
+ pr_err_once("EMODPR encountered exception %d\n",
+ ENCLS_TRAPNR(ret));
+ ret = -EFAULT;
-+ goto out_prot_restore;
++ goto out_unlock;
+ }
+ if (encls_failed(ret)) {
+ modp->result = ret;
+ ret = -EFAULT;
-+ goto out_prot_restore;
++ goto out_unlock;
+ }
+
+ ret = sgx_enclave_etrack(encl);
+ if (ret) {
+ ret = -EFAULT;
-+ goto out_reclaim;
++ goto out_unlock;
+ }
+
-+ sgx_mark_page_reclaimable(entry->epc_page);
+ mutex_unlock(&encl->lock);
+ }
+
+ ret = 0;
+ goto out;
+
-+out_prot_restore:
-+ entry->vm_run_prot_bits = run_prot_restore;
-+out_reclaim:
-+ sgx_mark_page_reclaimable(entry->epc_page);
+out_unlock:
+ mutex_unlock(&encl->lock);
+out:
@@ -304,10 +282,10 @@ index 9cc6af404bf6..23bdf558b231 100644
+}
+
+/**
-+ * sgx_ioc_enclave_restrict_perm() - handler for
-+ * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
++ * sgx_ioc_enclave_restrict_permissions() - handler for
++ * %SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS
+ * @encl: an enclave pointer
-+ * @arg: userspace pointer to a &struct sgx_enclave_restrict_perm
++ * @arg: userspace pointer to a &struct sgx_enclave_restrict_permissions
+ * instance
+ *
+ * SGX2 distinguishes between relaxing and restricting the enclave page
@@ -319,17 +297,14 @@ index 9cc6af404bf6..23bdf558b231 100644
+ * and ENCLS[ETRACK]. An attempt to relax EPCM permissions with this call
+ * will be ignored by the hardware.
+ *
-+ * Enclave page permissions are not allowed to exceed the maximum vetted
-+ * permissions maintained in &struct sgx_encl_page->vm_max_prot_bits.
-+ *
+ * Return:
+ * - 0: Success
+ * - -errno: Otherwise
+ */
-+static long sgx_ioc_enclave_restrict_perm(struct sgx_encl *encl,
-+ void __user *arg)
++static long sgx_ioc_enclave_restrict_permissions(struct sgx_encl *encl,
++ void __user *arg)
+{
-+ struct sgx_enclave_restrict_perm params;
++ struct sgx_enclave_restrict_permissions params;
+ u64 secinfo_perm;
+ long ret;
+
@@ -351,7 +326,7 @@ index 9cc6af404bf6..23bdf558b231 100644
+ if (params.result || params.count)
+ return -EINVAL;
+
-+ ret = sgx_enclave_restrict_perm(encl, &params, secinfo_perm);
++ ret = sgx_enclave_restrict_permissions(encl, &params, secinfo_perm);
+
+ if (copy_to_user(arg, &params, sizeof(params)))
+ return -EFAULT;
@@ -362,12 +337,13 @@ index 9cc6af404bf6..23bdf558b231 100644
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct sgx_encl *encl = filep->private_data;
-@@ -918,6 +1144,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
- case SGX_IOC_ENCLAVE_RELAX_PERMISSIONS:
- ret = sgx_ioc_enclave_relax_perm(encl, (void __user *)arg);
+@@ -681,6 +919,10 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+ case SGX_IOC_ENCLAVE_PROVISION:
+ ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
break;
+ case SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS:
-+ ret = sgx_ioc_enclave_restrict_perm(encl, (void __user *)arg);
++ ret = sgx_ioc_enclave_restrict_permissions(encl,
++ (void __user *)arg);
+ break;
default:
ret = -ENOIOCTLCMD;
diff --git a/0019-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch b/0015-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch
index 800e691ed388..b7ab3e4d0fcb 100644
--- a/0019-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch
+++ b/0015-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch
@@ -1,7 +1,7 @@
-From 6c2777344708ad4194d1f9088bd7dbd2c740fd16 Mon Sep 17 00:00:00 2001
+From c2c09a7fbb72fd5eb423f8e84ade9d1511503612 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:41 -0800
-Subject: [PATCH 19/34] x86/sgx: Support adding of pages to an initialized
+Date: Tue, 13 Jul 2021 08:58:53 -0700
+Subject: [PATCH 15/30] x86/sgx: Support adding of pages to an initialized
enclave
With SGX1 an enclave needs to be created with its maximum memory demands
@@ -13,10 +13,11 @@ creation, but all pages need not be added before enclave initialization.
Pages can be added during enclave runtime.
Add support for dynamically adding pages to an initialized enclave,
-architecturally limited to RW permission. Add pages via the page fault
-handler at the time an enclave address without a backing enclave page
-is accessed, potentially directly reclaiming pages if no free pages
-are available.
+architecturally limited to RW permission at creation but allowed to
+obtain RWX permissions after enclave runs EMODPE. Add pages via the
+page fault handler at the time an enclave address without a backing
+enclave page is accessed, potentially directly reclaiming pages if
+no free pages are available.
The enclave is still required to run ENCLU[EACCEPT] on the page before
it can be used. A useful flow is for the enclave to run ENCLU[EACCEPT]
@@ -39,26 +40,17 @@ triggers this flow but the page will remain inaccessible (access will
result in #PF) until accepted from within the enclave via
ENCLU[EACCEPT].
-The page is added with the architecturally constrained RW permissions
-as runtime as well as maximum allowed permissions. It is understood that
-there are some use cases, for example code relocation, that requires RWX
-maximum permissions. Supporting these use cases require guidance from
-user space policy before such maximum permissions can be allowed.
-Integration with user policy is deferred.
-
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
---
- arch/x86/kernel/cpu/sgx/encl.c | 133 ++++++++++++++++++++++++++++++++
- arch/x86/kernel/cpu/sgx/encl.h | 2 +
- arch/x86/kernel/cpu/sgx/ioctl.c | 4 +-
- 3 files changed, 137 insertions(+), 2 deletions(-)
+ arch/x86/kernel/cpu/sgx/encl.c | 124 +++++++++++++++++++++++++++++++++
+ 1 file changed, 124 insertions(+)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index cbd852fb760b..a5b1da1e5bd4 100644
+index 546423753e4c..fa4f947f8496 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -165,6 +165,128 @@ struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
- return entry;
+@@ -194,6 +194,119 @@ struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
+ return __sgx_encl_load_page(encl, entry);
}
+/**
@@ -82,29 +74,23 @@ index cbd852fb760b..a5b1da1e5bd4 100644
+ struct sgx_epc_page *epc_page;
+ struct sgx_va_page *va_page;
+ unsigned long phys_addr;
-+ unsigned long prot;
++ u64 secinfo_flags;
+ vm_fault_t vmret;
+ int ret;
+
+ if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
+ return VM_FAULT_SIGBUS;
+
-+ encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL);
-+ if (!encl_page)
-+ return VM_FAULT_OOM;
-+
-+ encl_page->desc = addr;
-+ encl_page->encl = encl;
-+
+ /*
-+ * Adding a regular page that is architecturally allowed to only
-+ * be created with RW permissions.
-+ * TBD: Interface with user space policy to support max permissions
-+ * of RWX.
++ * Ignore internal permission checking for dynamically added pages.
++ * They matter only for data added during the pre-initialization
++ * phase. The enclave decides the permissions by the means of
++ * EACCEPT, EACCEPTCOPY and EMODPE.
+ */
-+ prot = PROT_READ | PROT_WRITE;
-+ encl_page->vm_run_prot_bits = calc_vm_prot_bits(prot, 0);
-+ encl_page->vm_max_prot_bits = encl_page->vm_run_prot_bits;
++ secinfo_flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_X;
++ encl_page = sgx_encl_page_alloc(encl, addr - encl->base, secinfo_flags);
++ if (IS_ERR(encl_page))
++ return VM_FAULT_OOM;
+
+ epc_page = sgx_alloc_epc_page(encl_page, true);
+ if (IS_ERR(epc_page)) {
@@ -157,11 +143,8 @@ index cbd852fb760b..a5b1da1e5bd4 100644
+ /*
+ * Do not undo everything when creating PTE entry fails - next #PF
+ * would find page ready for a PTE.
-+ * PAGE_SHARED because protection is forced to be RW above and COW
-+ * is not supported.
+ */
-+ vmret = vmf_insert_pfn_prot(vma, addr, PFN_DOWN(phys_addr),
-+ PAGE_SHARED);
++ vmret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr));
+ if (vmret != VM_FAULT_NOPAGE) {
+ mutex_unlock(&encl->lock);
+ return VM_FAULT_SIGBUS;
@@ -186,7 +169,7 @@ index cbd852fb760b..a5b1da1e5bd4 100644
static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
{
unsigned long addr = (unsigned long)vmf->address;
-@@ -186,6 +308,17 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
+@@ -213,6 +326,17 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
if (unlikely(!encl))
return VM_FAULT_SIGBUS;
@@ -203,40 +186,7 @@ index cbd852fb760b..a5b1da1e5bd4 100644
+
mutex_lock(&encl->lock);
- entry = sgx_encl_load_page(encl, addr);
-diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index 848a28d28d3d..1b6ce1da7c92 100644
---- a/arch/x86/kernel/cpu/sgx/encl.h
-+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -123,4 +123,6 @@ void sgx_encl_free_epc_page(struct sgx_epc_page *page);
- struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl,
- unsigned long addr);
-
-+struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl);
-+void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page);
- #endif /* _X86_ENCL_H */
-diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 23bdf558b231..58ff62a1fb00 100644
---- a/arch/x86/kernel/cpu/sgx/ioctl.c
-+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -17,7 +17,7 @@
- #include "encl.h"
- #include "encls.h"
-
--static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
-+struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
- {
- struct sgx_va_page *va_page = NULL;
- void *err;
-@@ -43,7 +43,7 @@ static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
- return va_page;
- }
-
--static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
-+void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
- {
- encl->page_cnt--;
-
+ entry = sgx_encl_load_page_in_vma(encl, addr, vma->vm_flags);
--
2.35.1
diff --git a/0015-x86-sgx-Support-relaxing-of-enclave-page-permissions.patch b/0015-x86-sgx-Support-relaxing-of-enclave-page-permissions.patch
deleted file mode 100644
index c39ad552f928..000000000000
--- a/0015-x86-sgx-Support-relaxing-of-enclave-page-permissions.patch
+++ /dev/null
@@ -1,312 +0,0 @@
-From 3bdc7000cff598e3c065a103a62c12571300d5e6 Mon Sep 17 00:00:00 2001
-From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:37 -0800
-Subject: [PATCH 15/34] x86/sgx: Support relaxing of enclave page permissions
-
-In the initial (SGX1) version of SGX, pages in an enclave need to be
-created with permissions that support all usages of the pages, from
-the time the enclave is initialized until it is unloaded. For example,
-pages used by a JIT compiler or when code needs to otherwise be
-relocated need to always have RWX permissions.
-
-With the SGX2 function ENCLU[EMODPE] an enclave is able to relax
-the EPCM permissions of its pages after the enclave is initialized.
-Relaxing EPCM permissions is not possible from outside the enclave,
-including from the kernel. The kernel does control the PTEs though
-and the enclave still depends on the kernel to install PTEs with the
-new relaxed permissions before it (the enclave) can access the pages
-using the new permissions.
-
-Introduce ioctl() SGX_IOC_ENCLAVE_RELAX_PERMISSIONS to support
-relaxing of EPCM permissions done from within the enclave. With
-this ioctl() the user specifies a page range and the permissions to
-be applied to all pages in the provided range. After checking
-the new permissions (more detail below) the PTEs are reset and
-it is ensured that any new PTEs will contain the new, relaxed,
-permissions.
-
-The permission change request could fail on any page within the
-provided range. To support partial success the ioctl() returns
-an error code based on failures encountered by the kernel and
-the number of pages that were successfully changed.
-
-Checking user provided new permissions
-======================================
-
-Enclave page permission changes need to be approached with care and
-for this reason permission changes are only allowed if
-the new permissions are the same or more restrictive that the
-vetted permissions. Thus, even though an enclave is able to relax
-the EPCM permissions of its pages beyond what was originally vetted,
-the kernel will not. The kernel will only install PTEs that respect
-the vetted enclave page permissions.
-
-For example, enclave pages with vetted EPCM permissions in brackets
-below are allowed to have PTE permissions as follows:
-* (RWX) R => RW => RX => RWX
-* (RW) R => RW
-* (RX) R => RX
-
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
----
- arch/x86/include/uapi/asm/sgx.h | 19 +++
- arch/x86/kernel/cpu/sgx/ioctl.c | 199 ++++++++++++++++++++++++++++++++
- 2 files changed, 218 insertions(+)
-
-diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
-index f4b81587e90b..5c678b27bb72 100644
---- a/arch/x86/include/uapi/asm/sgx.h
-+++ b/arch/x86/include/uapi/asm/sgx.h
-@@ -29,6 +29,8 @@ enum sgx_page_flags {
- _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision)
- #define SGX_IOC_VEPC_REMOVE_ALL \
- _IO(SGX_MAGIC, 0x04)
-+#define SGX_IOC_ENCLAVE_RELAX_PERMISSIONS \
-+ _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_relax_perm)
-
- /**
- * struct sgx_enclave_create - parameter structure for the
-@@ -76,6 +78,23 @@ struct sgx_enclave_provision {
- __u64 fd;
- };
-
-+/**
-+ * struct sgx_enclave_relax_perm - parameters for ioctl
-+ * %SGX_IOC_ENCLAVE_RELAX_PERMISSIONS
-+ * @offset: starting page offset (page aligned relative to enclave base
-+ * address defined in SECS)
-+ * @length: length of memory (multiple of the page size)
-+ * @secinfo: address for the SECINFO data containing the new permission bits
-+ * for pages in range described by @offset and @length
-+ * @count: (output) bytes successfully changed (multiple of page size)
-+ */
-+struct sgx_enclave_relax_perm {
-+ __u64 offset;
-+ __u64 length;
-+ __u64 secinfo;
-+ __u64 count;
-+};
-+
- struct sgx_enclave_run;
-
- /**
-diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index b8336d5d9029..9cc6af404bf6 100644
---- a/arch/x86/kernel/cpu/sgx/ioctl.c
-+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -698,6 +698,202 @@ static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
- return sgx_set_attribute(&encl->attributes_mask, params.fd);
- }
-
-+static unsigned long vm_prot_from_secinfo(u64 secinfo_perm)
-+{
-+ unsigned long vm_prot;
-+
-+ vm_prot = _calc_vm_trans(secinfo_perm, SGX_SECINFO_R, PROT_READ) |
-+ _calc_vm_trans(secinfo_perm, SGX_SECINFO_W, PROT_WRITE) |
-+ _calc_vm_trans(secinfo_perm, SGX_SECINFO_X, PROT_EXEC);
-+ vm_prot = calc_vm_prot_bits(vm_prot, 0);
-+
-+ return vm_prot;
-+}
-+
-+/**
-+ * sgx_enclave_relax_perm() - Update OS after permissions relaxed by enclave
-+ * @encl: Enclave to which the pages belong.
-+ * @modp: Checked parameters from user on which pages need modifying.
-+ * @secinfo_perm: New validated permission bits.
-+ *
-+ * Return:
-+ * - 0: Success.
-+ * - -errno: Otherwise.
-+ */
-+static long sgx_enclave_relax_perm(struct sgx_encl *encl,
-+ struct sgx_enclave_relax_perm *modp,
-+ u64 secinfo_perm)
-+{
-+ struct sgx_encl_page *entry;
-+ unsigned long vm_prot;
-+ unsigned long addr;
-+ unsigned long c;
-+ int ret;
-+
-+ vm_prot = vm_prot_from_secinfo(secinfo_perm);
-+
-+ for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
-+ addr = encl->base + modp->offset + c;
-+
-+ mutex_lock(&encl->lock);
-+
-+ entry = xa_load(&encl->page_array, PFN_DOWN(addr));
-+ if (!entry) {
-+ ret = -EFAULT;
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * Changing EPCM permissions is only supported on regular
-+ * SGX pages.
-+ */
-+ if (entry->type != SGX_PAGE_TYPE_REG) {
-+ ret = -EINVAL;
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * Do not accept permissions that are more relaxed
-+ * than vetted permissions.
-+ * If this check fails then EPCM permissions may be more
-+ * relaxed that what would be allowed by the kernel via
-+ * PTEs.
-+ */
-+ if ((entry->vm_max_prot_bits & vm_prot) != vm_prot) {
-+ ret = -EPERM;
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * Change runtime protection before zapping PTEs to ensure
-+ * any new #PF uses new permissions.
-+ */
-+ entry->vm_run_prot_bits = vm_prot;
-+
-+ mutex_unlock(&encl->lock);
-+ /*
-+ * Do not keep encl->lock because of dependency on
-+ * mmap_lock acquired in sgx_zap_enclave_ptes().
-+ */
-+ sgx_zap_enclave_ptes(encl, addr);
-+ }
-+
-+ ret = 0;
-+ goto out;
-+
-+out_unlock:
-+ mutex_unlock(&encl->lock);
-+out:
-+ modp->count = c;
-+
-+ return ret;
-+}
-+
-+/*
-+ * Ensure enclave is ready for SGX2 functions. Readiness is checked
-+ * by ensuring the hardware supports SGX2 and the enclave is initialized
-+ * and thus able to handle requests to modify pages within it.
-+ */
-+static int sgx_ioc_sgx2_ready(struct sgx_encl *encl)
-+{
-+ if (!(cpu_feature_enabled(X86_FEATURE_SGX2)))
-+ return -ENODEV;
-+
-+ if (!test_bit(SGX_ENCL_INITIALIZED, &encl->flags))
-+ return -EINVAL;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Return valid permission fields from a secinfo structure provided by
-+ * user space. The secinfo structure is required to only have bits in
-+ * the permission fields set.
-+ */
-+static int sgx_perm_from_user_secinfo(void __user *_secinfo, u64 *secinfo_perm)
-+{
-+ struct sgx_secinfo secinfo;
-+ u64 perm;
-+
-+ if (copy_from_user(&secinfo, (void __user *)_secinfo,
-+ sizeof(secinfo)))
-+ return -EFAULT;
-+
-+ if (secinfo.flags & ~SGX_SECINFO_PERMISSION_MASK)
-+ return -EINVAL;
-+
-+ if (memchr_inv(secinfo.reserved, 0, sizeof(secinfo.reserved)))
-+ return -EINVAL;
-+
-+ perm = secinfo.flags & SGX_SECINFO_PERMISSION_MASK;
-+
-+ if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R))
-+ return -EINVAL;
-+
-+ *secinfo_perm = perm;
-+
-+ return 0;
-+}
-+
-+/**
-+ * sgx_ioc_enclave_relax_perm() - handler for
-+ * %SGX_IOC_ENCLAVE_RELAX_PERMISSIONS
-+ * @encl: an enclave pointer
-+ * @arg: userspace pointer to a &struct sgx_enclave_relax_perm instance
-+ *
-+ * SGX2 distinguishes between relaxing and restricting the enclave page
-+ * permissions maintained by the hardware (EPCM permissions) of pages
-+ * belonging to an initialized enclave (after %SGX_IOC_ENCLAVE_INIT).
-+ *
-+ * EPCM permissions can be relaxed anytime directly from within the enclave
-+ * with no visibility from the kernel. This is accomplished with
-+ * ENCLU[EMODPE] run from within the enclave. Accessing pages with
-+ * the new, relaxed permissions requires the kernel to update the PTE
-+ * to handle the subsequent #PF correctly.
-+ *
-+ * Enclave page permissions are not allowed to exceed the
-+ * maximum vetted permissions maintained in
-+ * &struct sgx_encl_page->vm_max_prot_bits. If the enclave
-+ * exceeds these permissions by running ENCLU[EMODPE] from within the enclave
-+ * the kernel will prevent access to the pages via PTE and
-+ * VMA permissions.
-+ *
-+ * Return:
-+ * - 0: Success
-+ * - -errno: Otherwise
-+ */
-+static long sgx_ioc_enclave_relax_perm(struct sgx_encl *encl, void __user *arg)
-+{
-+ struct sgx_enclave_relax_perm params;
-+ u64 secinfo_perm;
-+ long ret;
-+
-+ ret = sgx_ioc_sgx2_ready(encl);
-+ if (ret)
-+ return ret;
-+
-+ if (copy_from_user(&params, arg, sizeof(params)))
-+ return -EFAULT;
-+
-+ if (sgx_validate_offset_length(encl, params.offset, params.length))
-+ return -EINVAL;
-+
-+ ret = sgx_perm_from_user_secinfo((void __user *)params.secinfo,
-+ &secinfo_perm);
-+ if (ret)
-+ return ret;
-+
-+ if (params.count)
-+ return -EINVAL;
-+
-+ ret = sgx_enclave_relax_perm(encl, &params, secinfo_perm);
-+
-+ if (copy_to_user(arg, &params, sizeof(params)))
-+ return -EFAULT;
-+
-+ return ret;
-+}
-+
- long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
- {
- struct sgx_encl *encl = filep->private_data;
-@@ -719,6 +915,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
- case SGX_IOC_ENCLAVE_PROVISION:
- ret = sgx_ioc_enclave_provision(encl, (void __user *)arg);
- break;
-+ case SGX_IOC_ENCLAVE_RELAX_PERMISSIONS:
-+ ret = sgx_ioc_enclave_relax_perm(encl, (void __user *)arg);
-+ break;
- default:
- ret = -ENOIOCTLCMD;
- break;
---
-2.35.1
-
diff --git a/0020-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch b/0016-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch
index 2381237df89c..313d0c99ac13 100644
--- a/0020-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch
+++ b/0016-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch
@@ -1,7 +1,7 @@
-From 94d96d463650dee683538bb1563a71afca7719cb Mon Sep 17 00:00:00 2001
+From ad4892ff45c6bb365cfc090572059bba52b79a78 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:42 -0800
-Subject: [PATCH 20/34] x86/sgx: Tighten accessible memory range after enclave
+Date: Mon, 13 Sep 2021 11:08:47 -0700
+Subject: [PATCH 16/30] x86/sgx: Tighten accessible memory range after enclave
initialization
Before an enclave is initialized the enclave's memory range is unknown.
@@ -37,10 +37,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
1 file changed, 5 insertions(+)
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index a5b1da1e5bd4..5fe7189eac9d 100644
+index fa4f947f8496..7909570736a0 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -444,6 +444,11 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
+@@ -409,6 +409,11 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
XA_STATE(xas, &encl->page_array, PFN_DOWN(start));
diff --git a/0022-x86-sgx-Support-modifying-SGX-page-type.patch b/0017-x86-sgx-Support-modifying-SGX-page-type.patch
index eab8118013cb..bdba64106788 100644
--- a/0022-x86-sgx-Support-modifying-SGX-page-type.patch
+++ b/0017-x86-sgx-Support-modifying-SGX-page-type.patch
@@ -1,7 +1,7 @@
-From 88584ef777031604add6ed66b4d060ad9fce7cab Mon Sep 17 00:00:00 2001
+From 1d37a875597233c79bf09c64d310d75ebb4cb083 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:44 -0800
-Subject: [PATCH 22/34] x86/sgx: Support modifying SGX page type
+Date: Mon, 9 Aug 2021 14:03:54 -0700
+Subject: [PATCH 17/30] x86/sgx: Support modifying SGX page type
Every enclave contains one or more Thread Control Structures (TCS). The
TCS contains meta-data used by the hardware to save and restore thread
@@ -53,28 +53,28 @@ will encounter a page fault with SGX flag set in error code.
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
---
arch/x86/include/uapi/asm/sgx.h | 20 +++
- arch/x86/kernel/cpu/sgx/ioctl.c | 212 ++++++++++++++++++++++++++++++++
- 2 files changed, 232 insertions(+)
+ arch/x86/kernel/cpu/sgx/ioctl.c | 209 ++++++++++++++++++++++++++++++++
+ 2 files changed, 229 insertions(+)
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
-index b0ffb80bc67f..1df91517b612 100644
+index a0a24e94fb27..529f4ab28410 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
-@@ -33,6 +33,8 @@ enum sgx_page_flags {
- _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_relax_perm)
+@@ -31,6 +31,8 @@ enum sgx_page_flags {
+ _IO(SGX_MAGIC, 0x04)
#define SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS \
- _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_restrict_perm)
+ _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_restrict_permissions)
+#define SGX_IOC_ENCLAVE_MODIFY_TYPE \
-+ _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_modt)
++ _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_modify_type)
/**
* struct sgx_enclave_create - parameter structure for the
-@@ -116,6 +118,24 @@ struct sgx_enclave_restrict_perm {
+@@ -97,6 +99,24 @@ struct sgx_enclave_restrict_permissions {
__u64 count;
};
+/**
-+ * struct sgx_enclave_modt - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPE
++ * struct sgx_enclave_modify_type - parameters for %SGX_IOC_ENCLAVE_MODIFY_TYPE
+ * @offset: starting page offset (page aligned relative to enclave base
+ * address defined in SECS)
+ * @length: length of memory (multiple of the page size)
@@ -83,7 +83,7 @@ index b0ffb80bc67f..1df91517b612 100644
+ * @result: (output) SGX result code of ENCLS[EMODT] function
+ * @count: (output) bytes successfully changed (multiple of page size)
+ */
-+struct sgx_enclave_modt {
++struct sgx_enclave_modify_type {
+ __u64 offset;
+ __u64 length;
+ __u64 secinfo;
@@ -95,15 +95,15 @@ index b0ffb80bc67f..1df91517b612 100644
/**
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 58ff62a1fb00..3f59920184c4 100644
+index 4d88bfd163e7..6f769e67ec2d 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -1120,6 +1120,215 @@ static long sgx_ioc_enclave_restrict_perm(struct sgx_encl *encl,
+@@ -898,6 +898,212 @@ static long sgx_ioc_enclave_restrict_permissions(struct sgx_encl *encl,
return ret;
}
+/**
-+ * sgx_enclave_modt() - Modify type of SGX enclave pages
++ * sgx_enclave_modify_type() - Modify type of SGX enclave pages
+ * @encl: Enclave to which the pages belong.
+ * @modt: Checked parameters from user about which pages need modifying.
+ * @page_type: New page type.
@@ -112,11 +112,11 @@ index 58ff62a1fb00..3f59920184c4 100644
+ * - 0: Success
+ * - -errno: Otherwise
+ */
-+static long sgx_enclave_modt(struct sgx_encl *encl,
-+ struct sgx_enclave_modt *modt,
-+ enum sgx_page_type page_type)
++static long sgx_enclave_modify_type(struct sgx_encl *encl,
++ struct sgx_enclave_modify_type *modt,
++ enum sgx_page_type page_type)
+{
-+ unsigned long max_prot_restore, run_prot_restore;
++ unsigned long max_prot_restore;
+ struct sgx_encl_page *entry;
+ struct sgx_secinfo secinfo;
+ unsigned long prot;
@@ -160,7 +160,6 @@ index 58ff62a1fb00..3f59920184c4 100644
+ }
+
+ max_prot_restore = entry->vm_max_prot_bits;
-+ run_prot_restore = entry->vm_run_prot_bits;
+
+ /*
+ * Once a regular page becomes a TCS page it cannot be
@@ -178,7 +177,6 @@ index 58ff62a1fb00..3f59920184c4 100644
+ }
+ prot = PROT_READ | PROT_WRITE;
+ entry->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
-+ entry->vm_run_prot_bits = entry->vm_max_prot_bits;
+
+ /*
+ * Prevent page from being reclaimed while mutex
@@ -240,7 +238,6 @@ index 58ff62a1fb00..3f59920184c4 100644
+
+out_entry_changed:
+ entry->vm_max_prot_bits = max_prot_restore;
-+ entry->vm_run_prot_bits = run_prot_restore;
+out_unlock:
+ mutex_unlock(&encl->lock);
+out:
@@ -250,9 +247,9 @@ index 58ff62a1fb00..3f59920184c4 100644
+}
+
+/**
-+ * sgx_ioc_enclave_modt() - handler for %SGX_IOC_ENCLAVE_MODIFY_TYPE
++ * sgx_ioc_enclave_modify_type() - handler for %SGX_IOC_ENCLAVE_MODIFY_TYPE
+ * @encl: an enclave pointer
-+ * @arg: userspace pointer to a &struct sgx_enclave_modt instance
++ * @arg: userspace pointer to a &struct sgx_enclave_modify_type instance
+ *
+ * Ability to change the enclave page type supports the following use cases:
+ *
@@ -272,9 +269,9 @@ index 58ff62a1fb00..3f59920184c4 100644
+ * - 0: Success
+ * - -errno: Otherwise
+ */
-+static long sgx_ioc_enclave_modt(struct sgx_encl *encl, void __user *arg)
++static long sgx_ioc_enclave_modify_type(struct sgx_encl *encl, void __user *arg)
+{
-+ struct sgx_enclave_modt params;
++ struct sgx_enclave_modify_type params;
+ enum sgx_page_type page_type;
+ struct sgx_secinfo secinfo;
+ long ret;
@@ -303,7 +300,7 @@ index 58ff62a1fb00..3f59920184c4 100644
+ return -EINVAL;
+
+ page_type = (secinfo.flags & SGX_SECINFO_PAGE_TYPE_MASK) >> 8;
-+ ret = sgx_enclave_modt(encl, &params, page_type);
++ ret = sgx_enclave_modify_type(encl, &params, page_type);
+
+ if (copy_to_user(arg, &params, sizeof(params)))
+ return -EFAULT;
@@ -314,12 +311,12 @@ index 58ff62a1fb00..3f59920184c4 100644
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct sgx_encl *encl = filep->private_data;
-@@ -1147,6 +1356,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
- case SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS:
- ret = sgx_ioc_enclave_restrict_perm(encl, (void __user *)arg);
+@@ -923,6 +1129,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+ ret = sgx_ioc_enclave_restrict_permissions(encl,
+ (void __user *)arg);
break;
+ case SGX_IOC_ENCLAVE_MODIFY_TYPE:
-+ ret = sgx_ioc_enclave_modt(encl, (void __user *)arg);
++ ret = sgx_ioc_enclave_modify_type(encl, (void __user *)arg);
+ break;
default:
ret = -ENOIOCTLCMD;
diff --git a/0023-x86-sgx-Support-complete-page-removal.patch b/0018-x86-sgx-Support-complete-page-removal.patch
index 57ab38265d99..8201a1788009 100644
--- a/0023-x86-sgx-Support-complete-page-removal.patch
+++ b/0018-x86-sgx-Support-complete-page-removal.patch
@@ -1,7 +1,7 @@
-From e8450696982167257dffebfbd8983e8d7b9bc235 Mon Sep 17 00:00:00 2001
+From 342f61fe55c7e9a5e981d1d1a5102ef8f1384322 Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:45 -0800
-Subject: [PATCH 23/34] x86/sgx: Support complete page removal
+Date: Wed, 8 Sep 2021 11:51:35 -0700
+Subject: [PATCH 18/30] x86/sgx: Support complete page removal
The SGX2 page removal flow was introduced in previous patch and is
as follows:
@@ -38,19 +38,19 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
2 files changed, 166 insertions(+)
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
-index 1df91517b612..db969a2a1874 100644
+index 529f4ab28410..feda7f85b2ce 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
-@@ -35,6 +35,8 @@ enum sgx_page_flags {
- _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_restrict_perm)
+@@ -33,6 +33,8 @@ enum sgx_page_flags {
+ _IOWR(SGX_MAGIC, 0x05, struct sgx_enclave_restrict_permissions)
#define SGX_IOC_ENCLAVE_MODIFY_TYPE \
- _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_modt)
+ _IOWR(SGX_MAGIC, 0x06, struct sgx_enclave_modify_type)
+#define SGX_IOC_ENCLAVE_REMOVE_PAGES \
-+ _IOWR(SGX_MAGIC, 0x08, struct sgx_enclave_remove_pages)
++ _IOWR(SGX_MAGIC, 0x07, struct sgx_enclave_remove_pages)
/**
* struct sgx_enclave_create - parameter structure for the
-@@ -136,6 +138,25 @@ struct sgx_enclave_modt {
+@@ -117,6 +119,25 @@ struct sgx_enclave_modify_type {
__u64 count;
};
@@ -77,10 +77,10 @@ index 1df91517b612..db969a2a1874 100644
/**
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 3f59920184c4..0ffb07095a80 100644
+index 6f769e67ec2d..515e1961cc02 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -1329,6 +1329,148 @@ static long sgx_ioc_enclave_modt(struct sgx_encl *encl, void __user *arg)
+@@ -1104,6 +1104,148 @@ static long sgx_ioc_enclave_modify_type(struct sgx_encl *encl, void __user *arg)
return ret;
}
@@ -229,9 +229,9 @@ index 3f59920184c4..0ffb07095a80 100644
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct sgx_encl *encl = filep->private_data;
-@@ -1359,6 +1501,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+@@ -1132,6 +1274,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case SGX_IOC_ENCLAVE_MODIFY_TYPE:
- ret = sgx_ioc_enclave_modt(encl, (void __user *)arg);
+ ret = sgx_ioc_enclave_modify_type(encl, (void __user *)arg);
break;
+ case SGX_IOC_ENCLAVE_REMOVE_PAGES:
+ ret = sgx_ioc_enclave_remove_pages(encl, (void __user *)arg);
diff --git a/0031-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch b/0019-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch
index e911fb8c4a11..6beecdd6a67f 100644
--- a/0031-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch
+++ b/0019-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch
@@ -1,7 +1,7 @@
-From be44ee6db36a464e6f9d2847c4c095ab5bbb65b2 Mon Sep 17 00:00:00 2001
+From f64beb4d3a1c793f928a4111daef0fee85f4733f Mon Sep 17 00:00:00 2001
From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:53 -0800
-Subject: [PATCH 31/34] x86/sgx: Free up EPC pages directly to support large
+Date: Thu, 23 Sep 2021 14:56:45 -0700
+Subject: [PATCH 19/30] x86/sgx: Free up EPC pages directly to support large
page ranges
The page reclaimer ensures availability of EPC pages across all
@@ -31,10 +31,10 @@ Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
3 files changed, 13 insertions(+)
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index 0ffb07095a80..d8c3c07badb3 100644
+index 515e1961cc02..f88bc1236276 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -962,6 +962,8 @@ static long sgx_enclave_restrict_perm(struct sgx_encl *encl,
+@@ -777,6 +777,8 @@ sgx_enclave_restrict_permissions(struct sgx_encl *encl,
for (c = 0 ; c < modp->length; c += PAGE_SIZE) {
addr = encl->base + modp->offset + c;
@@ -43,7 +43,7 @@ index 0ffb07095a80..d8c3c07badb3 100644
mutex_lock(&encl->lock);
entry = sgx_encl_load_page(encl, addr);
-@@ -1156,6 +1158,8 @@ static long sgx_enclave_modt(struct sgx_encl *encl,
+@@ -934,6 +936,8 @@ static long sgx_enclave_modify_type(struct sgx_encl *encl,
for (c = 0 ; c < modt->length; c += PAGE_SIZE) {
addr = encl->base + modt->offset + c;
@@ -52,7 +52,7 @@ index 0ffb07095a80..d8c3c07badb3 100644
mutex_lock(&encl->lock);
entry = sgx_encl_load_page(encl, addr);
-@@ -1354,6 +1358,8 @@ static long sgx_encl_remove_pages(struct sgx_encl *encl,
+@@ -1129,6 +1133,8 @@ static long sgx_encl_remove_pages(struct sgx_encl *encl,
for (c = 0 ; c < params->length; c += PAGE_SIZE) {
addr = encl->base + params->offset + c;
@@ -62,10 +62,10 @@ index 0ffb07095a80..d8c3c07badb3 100644
entry = sgx_encl_load_page(encl, addr);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
-index 1a3014aec490..a1cb7435932a 100644
+index 6e2cb7564080..545da16bb3ea 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
-@@ -378,6 +378,12 @@ static bool sgx_should_reclaim(unsigned long watermark)
+@@ -370,6 +370,12 @@ static bool sgx_should_reclaim(unsigned long watermark)
!list_empty(&sgx_active_page_list);
}
diff --git a/0024-Documentation-x86-Introduce-enclave-runtime-manageme.patch b/0024-Documentation-x86-Introduce-enclave-runtime-manageme.patch
deleted file mode 100644
index 381c703a49aa..000000000000
--- a/0024-Documentation-x86-Introduce-enclave-runtime-manageme.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From 20acb57bc235f289b2d083a408d134a0db676ed1 Mon Sep 17 00:00:00 2001
-From: Reinette Chatre <reinette.chatre@intel.com>
-Date: Mon, 7 Feb 2022 16:45:46 -0800
-Subject: [PATCH 24/34] Documentation/x86: Introduce enclave runtime management
- section
-
-Enclave runtime management is introduced following the pattern
-of the section describing enclave building. Provide a brief
-summary of enclave runtime management, pointing to the functions
-implementing the ioctl()s that will contain details within their
-kernel-doc.
-
-Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
----
- Documentation/x86/sgx.rst | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
-diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst
-index 9df620b59f83..4059efbb4d2e 100644
---- a/Documentation/x86/sgx.rst
-+++ b/Documentation/x86/sgx.rst
-@@ -138,6 +138,22 @@ pages and establish enclave page permissions.
- sgx_ioc_enclave_init
- sgx_ioc_enclave_provision
-
-+Enclave runtime management
-+--------------------------
-+
-+Systems supporting SGX2 additionally support changes to initialized
-+enclaves: modifying enclave page permissions and type, and dynamically
-+adding and removing of enclave pages. When an enclave accesses an address
-+within its address range that does not have a backing page then a new
-+regular page will be dynamically added to the enclave. The enclave is
-+still required to run EACCEPT on the new page before it can be used.
-+
-+.. kernel-doc:: arch/x86/kernel/cpu/sgx/ioctl.c
-+ :functions: sgx_ioc_enclave_relax_perm
-+ sgx_ioc_enclave_restrict_perm
-+ sgx_ioc_enclave_modt
-+ sgx_ioc_enclave_remove_pages
-+
- Enclave vDSO
- ------------
-
---
-2.35.1
-
diff --git a/0033-x86-sgx-Enable-PROT_EXEC-for-EAUG-d-pages.patch b/0033-x86-sgx-Enable-PROT_EXEC-for-EAUG-d-pages.patch
deleted file mode 100644
index 9285bc9a6c44..000000000000
--- a/0033-x86-sgx-Enable-PROT_EXEC-for-EAUG-d-pages.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From 48d8abe084ef10a653a831437f6fb5117051acec Mon Sep 17 00:00:00 2001
-From: Jarkko Sakkinen <jarkko@kernel.org>
-Date: Mon, 7 Mar 2022 17:28:42 +0200
-Subject: [PATCH 33/34] x86/sgx: Enable PROT_EXEC for EAUG'd pages
-
-vm_max_permissions was created to control the pre-initialization content
-that contributes to MRSIGNATURE. It was never meant to be as a limit to
-dynamically added pages.
-
-E.g. static content could be used as a hook for LSM's to decide whether
-certain signature is qualified for EINIT. Dynamic content has nothing to
-do with that. The current mechanisms only add to the complexity on how
-to control PTE and EPCM permissions, and do not add anything else than
-obfuscity to security side of things.
-
-Thus add PROT_EXEC to the permissions assigned by the #PF handler.
-
-Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
----
- arch/x86/kernel/cpu/sgx/encl.c | 9 ++++-----
- 1 file changed, 4 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 5fe7189eac9d..cbafad786ff5 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -201,12 +201,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
- encl_page->encl = encl;
-
- /*
-- * Adding a regular page that is architecturally allowed to only
-- * be created with RW permissions.
-- * TBD: Interface with user space policy to support max permissions
-- * of RWX.
-+ * Dynamic pages do not contribute to MRSIGNATURE, i.e. they are
-+ * controlled only by PTE and EPCM permissions. Thus, the no limit
-+ * is set here.
- */
-- prot = PROT_READ | PROT_WRITE;
-+ prot = PROT_READ | PROT_WRITE | PROT_EXEC;
- encl_page->vm_run_prot_bits = calc_vm_prot_bits(prot, 0);
- encl_page->vm_max_prot_bits = encl_page->vm_run_prot_bits;
-
---
-2.35.1
-
diff --git a/0034-Revert-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot.patch b/0034-Revert-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot.patch
deleted file mode 100644
index 3c26d615cc6a..000000000000
--- a/0034-Revert-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot.patch
+++ /dev/null
@@ -1,206 +0,0 @@
-From e144c0fc07a2a84a60ff35c25eaf39bc465dc7a2 Mon Sep 17 00:00:00 2001
-From: Jarkko Sakkinen <jarkko@kernel.org>
-Date: Fri, 11 Mar 2022 15:59:23 +0200
-Subject: [PATCH 34/34] Revert "x86/sgx: x86/sgx: Add
- sgx_encl_page->vm_run_prot_bits for dynamic permission changes"
-
-This reverts commit 730033a082775ac59f28c35c18aa39132fd4cfe9.
----
- Documentation/x86/sgx.rst | 10 ----------
- arch/x86/kernel/cpu/sgx/encl.c | 9 ++++-----
- arch/x86/kernel/cpu/sgx/encl.h | 3 +--
- arch/x86/kernel/cpu/sgx/ioctl.c | 33 ++++-----------------------------
- 4 files changed, 9 insertions(+), 46 deletions(-)
-
-diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst
-index 4059efbb4d2e..6c66ce0ec69c 100644
---- a/Documentation/x86/sgx.rst
-+++ b/Documentation/x86/sgx.rst
-@@ -99,16 +99,6 @@ The relationships between the different permission masks are:
- * PTEs are installed to match the EPCM permissions, but not be more
- relaxed than the VMA permissions.
-
--During runtime the EPCM permissions of enclave pages belonging to an
--initialized enclave can change on systems supporting SGX2. In support
--of these runtime changes the kernel maintains (for each enclave page)
--the most permissive EPCM permission mask allowed by policy as
--the ``vm_max_prot_bits`` of that page. EPCM permissions are not allowed
--to be relaxed beyond ``vm_max_prot_bits``. The kernel also maintains
--the currently active EPCM permissions of an enclave page as its
--``vm_run_prot_bits`` to ensure PTEs and new VMAs respect the active
--EPCM permission values.
--
- On systems supporting SGX2 EPCM permissions may change while the
- enclave page belongs to a VMA without impacting the VMA permissions.
- This means that a running VMA may appear to allow access to an enclave
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index cbafad786ff5..fbb19ebe065c 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -206,8 +206,7 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
- * is set here.
- */
- prot = PROT_READ | PROT_WRITE | PROT_EXEC;
-- encl_page->vm_run_prot_bits = calc_vm_prot_bits(prot, 0);
-- encl_page->vm_max_prot_bits = encl_page->vm_run_prot_bits;
-+ encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
-
- epc_page = sgx_alloc_epc_page(encl_page, true);
- if (IS_ERR(epc_page)) {
-@@ -337,7 +336,7 @@ static vm_fault_t sgx_vma_fault(struct vm_fault *vmf)
- * exceed the VMA permissions.
- */
- vm_prot_bits = vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
-- page_prot_bits = entry->vm_run_prot_bits & vm_prot_bits;
-+ page_prot_bits = entry->vm_max_prot_bits & vm_prot_bits;
- /*
- * Add VM_SHARED so that PTE is made writable right away if VMA
- * and EPCM are writable (no COW in SGX).
-@@ -390,7 +389,7 @@ static vm_fault_t sgx_vma_pfn_mkwrite(struct vm_fault *vmf)
- goto out;
- }
-
-- if (!(entry->vm_run_prot_bits & VM_WRITE))
-+ if (!(entry->vm_max_prot_bits & VM_WRITE))
- ret = VM_FAULT_SIGBUS;
-
- out:
-@@ -458,7 +457,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
- mutex_lock(&encl->lock);
- xas_lock(&xas);
- xas_for_each(&xas, page, PFN_DOWN(end - 1)) {
-- if (~page->vm_run_prot_bits & vm_prot_bits) {
-+ if (~page->vm_max_prot_bits & vm_prot_bits) {
- ret = -EACCES;
- break;
- }
-diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
-index 1b6ce1da7c92..47d4750b581f 100644
---- a/arch/x86/kernel/cpu/sgx/encl.h
-+++ b/arch/x86/kernel/cpu/sgx/encl.h
-@@ -27,8 +27,7 @@
-
- struct sgx_encl_page {
- unsigned long desc;
-- unsigned long vm_max_prot_bits:8;
-- unsigned long vm_run_prot_bits:8;
-+ unsigned long vm_max_prot_bits:16;
- enum sgx_page_type type:16;
- struct sgx_epc_page *epc_page;
- struct sgx_encl *encl;
-diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
-index d8c3c07badb3..9ce13a962483 100644
---- a/arch/x86/kernel/cpu/sgx/ioctl.c
-+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
-@@ -198,12 +198,6 @@ static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
- /* Calculate maximum of the VM flags for the page. */
- encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
-
-- /*
-- * At time of allocation, the runtime protection bits are the same
-- * as the maximum protection bits.
-- */
-- encl_page->vm_run_prot_bits = encl_page->vm_max_prot_bits;
--
- return encl_page;
- }
-
-@@ -764,12 +758,6 @@ static long sgx_enclave_relax_perm(struct sgx_encl *encl,
- goto out_unlock;
- }
-
-- /*
-- * Change runtime protection before zapping PTEs to ensure
-- * any new #PF uses new permissions.
-- */
-- entry->vm_run_prot_bits = vm_prot;
--
- mutex_unlock(&encl->lock);
- /*
- * Do not keep encl->lock because of dependency on
-@@ -946,9 +934,9 @@ static long sgx_enclave_restrict_perm(struct sgx_encl *encl,
- struct sgx_enclave_restrict_perm *modp,
- u64 secinfo_perm)
- {
-- unsigned long vm_prot, run_prot_restore;
- struct sgx_encl_page *entry;
- struct sgx_secinfo secinfo;
-+ unsigned long vm_prot;
- unsigned long addr;
- unsigned long c;
- void *epc_virt;
-@@ -1002,14 +990,6 @@ static long sgx_enclave_restrict_perm(struct sgx_encl *encl,
- goto out_unlock;
- }
-
-- /*
-- * Change runtime protection before zapping PTEs to ensure
-- * any new #PF uses new permissions. EPCM permissions (if
-- * needed) not changed yet.
-- */
-- run_prot_restore = entry->vm_run_prot_bits;
-- entry->vm_run_prot_bits = vm_prot;
--
- mutex_unlock(&encl->lock);
- /*
- * Do not keep encl->lock because of dependency on
-@@ -1033,12 +1013,12 @@ static long sgx_enclave_restrict_perm(struct sgx_encl *encl,
- pr_err_once("EMODPR encountered exception %d\n",
- ENCLS_TRAPNR(ret));
- ret = -EFAULT;
-- goto out_prot_restore;
-+ goto out_reclaim;
- }
- if (encls_failed(ret)) {
- modp->result = ret;
- ret = -EFAULT;
-- goto out_prot_restore;
-+ goto out_reclaim;
- }
-
- ret = sgx_enclave_etrack(encl);
-@@ -1054,8 +1034,6 @@ static long sgx_enclave_restrict_perm(struct sgx_encl *encl,
- ret = 0;
- goto out;
-
--out_prot_restore:
-- entry->vm_run_prot_bits = run_prot_restore;
- out_reclaim:
- sgx_mark_page_reclaimable(entry->epc_page);
- out_unlock:
-@@ -1136,7 +1114,7 @@ static long sgx_enclave_modt(struct sgx_encl *encl,
- struct sgx_enclave_modt *modt,
- enum sgx_page_type page_type)
- {
-- unsigned long max_prot_restore, run_prot_restore;
-+ unsigned long max_prot_restore;
- struct sgx_encl_page *entry;
- struct sgx_secinfo secinfo;
- unsigned long prot;
-@@ -1182,7 +1160,6 @@ static long sgx_enclave_modt(struct sgx_encl *encl,
- }
-
- max_prot_restore = entry->vm_max_prot_bits;
-- run_prot_restore = entry->vm_run_prot_bits;
-
- /*
- * Once a regular page becomes a TCS page it cannot be
-@@ -1200,7 +1177,6 @@ static long sgx_enclave_modt(struct sgx_encl *encl,
- }
- prot = PROT_READ | PROT_WRITE;
- entry->vm_max_prot_bits = calc_vm_prot_bits(prot, 0);
-- entry->vm_run_prot_bits = entry->vm_max_prot_bits;
-
- /*
- * Prevent page from being reclaimed while mutex
-@@ -1262,7 +1238,6 @@ static long sgx_enclave_modt(struct sgx_encl *encl,
-
- out_entry_changed:
- entry->vm_max_prot_bits = max_prot_restore;
-- entry->vm_run_prot_bits = run_prot_restore;
- out_unlock:
- mutex_unlock(&encl->lock);
- out:
---
-2.35.1
-
diff --git a/0035-x86-sgx-Free-backing-memory-after-faulting-the-encla.patch b/0035-x86-sgx-Free-backing-memory-after-faulting-the-encla.patch
deleted file mode 100644
index f04a288aa7aa..000000000000
--- a/0035-x86-sgx-Free-backing-memory-after-faulting-the-encla.patch
+++ /dev/null
@@ -1,170 +0,0 @@
-From 4ab8c58f8e9d96e300dff48552f62caedbdac582 Mon Sep 17 00:00:00 2001
-From: Jarkko Sakkinen <jarkko@kernel.org>
-Date: Sat, 8 Jan 2022 16:05:10 +0200
-Subject: [PATCH] x86/sgx: Free backing memory after faulting the enclave page
-
-There is a limited amount of SGX memory (EPC) on each system. When that
-memory is used up, SGX has its own swapping mechanism which is similar
-in concept but totally separate from the core mm/* code. Instead of
-swapping to disk, SGX swaps from EPC to normal RAM. That normal RAM
-comes from a shared memory pseudo-file and can itself be swapped by the
-core mm code. There is a hierarchy like this:
-
- EPC <-> shmem <-> disk
-
-After data is swapped back in from shmem to EPC, the shmem backing
-storage needs to be freed. Currently, the backing shmem is not freed.
-This effectively wastes the shmem while the enclave is running. The
-memory is recovered when the enclave is destroyed and the backing
-storage freed.
-
-Sort this out by freeing memory with shmem_truncate_range(), as soon as
-a page is faulted back to the EPC. In addition, free the memory for
-PCMD pages as soon as all PCMD's in a page have been marked as unused
-by zeroing its contents.
-
-Reported-by: Dave Hansen <dave.hansen@linux.intel.com>
-Cc: stable@vger.kernel.org
-Fixes: 1728ab54b4be ("x86/sgx: Add a page reclaimer")
-Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
-
-v5:
-* Encapsulated file offset calculation for PCMD struct.
-* Replaced "magic number" PAGE_SIZE with sizeof(struct sgx_secs) to make
- the offset calculation more self-documentative.
-v4:
-* Sanitized the offset calculations.
-v3:
-* Resend.
-v2:
-* Rewrite commit message as proposed by Dave.
-* Truncate PCMD pages (Dave).
----
- arch/x86/kernel/cpu/sgx/encl.c | 57 ++++++++++++++++++++++++++++------
- 1 file changed, 48 insertions(+), 9 deletions(-)
-
-diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
-index 8be6f0592bdc..3d2ed8d27747 100644
---- a/arch/x86/kernel/cpu/sgx/encl.c
-+++ b/arch/x86/kernel/cpu/sgx/encl.c
-@@ -12,6 +12,30 @@
- #include "encls.h"
- #include "sgx.h"
-
-+/*
-+ * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
-+ * follow right after the EPC data in the backing storage. In addition to the
-+ * visible enclave pages, there's one extra page slot for SECS, before PCMD
-+ * structs.
-+ */
-+static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
-+ unsigned long page_index)
-+{
-+ pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
-+
-+ return epc_end_off + page_index * sizeof(struct sgx_pcmd);
-+}
-+
-+/*
-+ * Free a page from the backing storage in the given page index.
-+ */
-+static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
-+{
-+ struct inode *inode = file_inode(encl->backing);
-+
-+ shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
-+}
-+
- /*
- * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
- * Pages" in the SDM.
-@@ -22,9 +46,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
- {
- unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
- struct sgx_encl *encl = encl_page->encl;
-+ pgoff_t page_index, page_pcmd_off;
- struct sgx_pageinfo pginfo;
- struct sgx_backing b;
-- pgoff_t page_index;
-+ bool pcmd_page_empty;
-+ u8 *pcmd_page;
- int ret;
-
- if (secs_page)
-@@ -32,14 +58,16 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
- else
- page_index = PFN_DOWN(encl->size);
-
-+ page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
-+
- ret = sgx_encl_lookup_backing(encl, page_index, &b);
- if (ret)
- return ret;
-
- pginfo.addr = encl_page->desc & PAGE_MASK;
- pginfo.contents = (unsigned long)kmap_atomic(b.contents);
-- pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
-- b.pcmd_offset;
-+ pcmd_page = kmap_atomic(b.pcmd);
-+ pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
-
- if (secs_page)
- pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
-@@ -55,11 +83,24 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
- ret = -EFAULT;
- }
-
-- kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
-+ memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
-+
-+ /*
-+ * The area for the PCMD in the page was zeroed above. Check if the
-+ * whole page is now empty meaning that all PCMD's have been zeroed:
-+ */
-+ pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
-+
-+ kunmap_atomic(pcmd_page);
- kunmap_atomic((void *)(unsigned long)pginfo.contents);
-
- sgx_encl_put_backing(&b, false);
-
-+ sgx_encl_truncate_backing_page(encl, page_index);
-+
-+ if (pcmd_page_empty)
-+ sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
-+
- return ret;
- }
-
-@@ -583,7 +624,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
- static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
- struct sgx_backing *backing)
- {
-- pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
-+ pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
- struct page *contents;
- struct page *pcmd;
-
-@@ -591,7 +632,7 @@ static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
- if (IS_ERR(contents))
- return PTR_ERR(contents);
-
-- pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
-+ pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
- if (IS_ERR(pcmd)) {
- put_page(contents);
- return PTR_ERR(pcmd);
-@@ -600,9 +641,7 @@ static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
- backing->page_index = page_index;
- backing->contents = contents;
- backing->pcmd = pcmd;
-- backing->pcmd_offset =
-- (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
-- sizeof(struct sgx_pcmd);
-+ backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
-
- return 0;
- }
---
-2.35.1
-
diff --git a/PKGBUILD b/PKGBUILD
index 3c6850f42d06..62222016f288 100644
--- a/PKGBUILD
+++ b/PKGBUILD
@@ -22,26 +22,21 @@ source=(
0002-x86-sgx-Add-wrapper-for-SGX2-EMODPR-function.patch
0003-x86-sgx-Add-wrapper-for-SGX2-EMODT-function.patch
0004-x86-sgx-Add-wrapper-for-SGX2-EAUG-function.patch
- 0005-Documentation-x86-Document-SGX-permission-details.patch
- 0006-x86-sgx-Support-VMA-permissions-more-relaxed-than-en.patch
- 0007-x86-sgx-Add-pfn_mkwrite-handler-for-present-PTEs.patch
- 0008-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot_bits-f.patch
- 0009-x86-sgx-Export-sgx_encl_ewb_cpumask.patch
- 0010-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch
- 0011-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch
- 0012-x86-sgx-Make-sgx_ipi_cb-available-internally.patch
- 0013-x86-sgx-Create-utility-to-validate-user-provided-off.patch
- 0014-x86-sgx-Keep-record-of-SGX-page-type.patch
- 0015-x86-sgx-Support-relaxing-of-enclave-page-permissions.patch
- 0016-x86-sgx-Support-restricting-of-enclave-page-permissi.patch
- 0019-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch
- 0020-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch
- 0022-x86-sgx-Support-modifying-SGX-page-type.patch
- 0023-x86-sgx-Support-complete-page-removal.patch
- 0024-Documentation-x86-Introduce-enclave-runtime-manageme.patch
- 0031-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch
- 0033-x86-sgx-Enable-PROT_EXEC-for-EAUG-d-pages.patch
- 0034-Revert-x86-sgx-x86-sgx-Add-sgx_encl_page-vm_run_prot.patch
+ 0005-x86-sgx-Support-loading-enclave-page-without-VMA-per.patch
+ 0006-x86-sgx-Export-sgx_encl_ewb_cpumask.patch
+ 0007-x86-sgx-Rename-sgx_encl_ewb_cpumask-as-sgx_encl_cpum.patch
+ 0008-x86-sgx-Move-PTE-zap-code-to-new-sgx_zap_enclave_pte.patch
+ 0009-x86-sgx-Make-sgx_ipi_cb-available-internally.patch
+ 0010-x86-sgx-Create-utility-to-validate-user-provided-off.patch
+ 0011-x86-sgx-Keep-record-of-SGX-page-type.patch
+ 0012-x86-sgx-Export-sgx_encl_-grow-shrink.patch
+ 0013-x86-sgx-Export-sgx_encl_page_alloc.patch
+ 0014-x86-sgx-Support-restricting-of-enclave-page-permissi.patch
+ 0015-x86-sgx-Support-adding-of-pages-to-an-initialized-en.patch
+ 0016-x86-sgx-Tighten-accessible-memory-range-after-enclav.patch
+ 0017-x86-sgx-Support-modifying-SGX-page-type.patch
+ 0018-x86-sgx-Support-complete-page-removal.patch
+ 0019-x86-sgx-Free-up-EPC-pages-directly-to-support-large-.patch
)
validpgpkeys=(
'ABAF11C65A2970B130ABE3C479BE3E4300411886' # Linus Torvalds