* [PATCH 1/4] x86/sgx: Ensure enclave state is visible before marking it created
2019-08-27 0:11 [PATCH 0/4] x86/sgx: Fix lock ordering bug w/ EADD Sean Christopherson
@ 2019-08-27 0:11 ` Sean Christopherson
2019-08-27 11:20 ` Jarkko Sakkinen
2019-08-27 0:11 ` [PATCH 2/4] x86/sgx: Preserved allowed attributes during SGX_IOC_ENCLAVE_CREATE Sean Christopherson
` (2 subsequent siblings)
3 siblings, 1 reply; 11+ messages in thread
From: Sean Christopherson @ 2019-08-27 0:11 UTC (permalink / raw)
To: Jarkko Sakkinen; +Cc: linux-sgx
Add a memory barrier pair to ensure all enclave state is visible in
memory prior to SGX_ENCL_CREATED being set. Without the barries, adding
pages and/or initializing the enclaves could theoretically consume stale
data.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
arch/x86/kernel/cpu/sgx/ioctl.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 911ff3b0f061..7134d68aecb3 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -163,6 +163,15 @@ static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
return encl_page;
}
+static bool is_encl_created(struct sgx_encl *encl)
+{
+ bool created = encl->flags & SGX_ENCL_CREATED;
+
+ /* Pairs with smp_wmb() in sgx_encl_create(). */
+ smp_rmb();
+ return created;
+}
+
static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
{
unsigned long encl_size = secs->size + PAGE_SIZE;
@@ -231,8 +240,9 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
/*
* Set SGX_ENCL_CREATED only after the enclave is fully prepped. This
* allows other flows to check if the enclave has been created without
- * taking encl->lock.
+ * taking encl->lock. Pairs with smp_rmb() in is_encl_created().
*/
+ smp_wmb();
encl->flags |= SGX_ENCL_CREATED;
mutex_unlock(&encl->lock);
@@ -478,7 +488,7 @@ static long sgx_ioc_enclave_add_page(struct file *filep, void __user *arg)
struct sgx_enclave_add_page addp;
struct sgx_secinfo secinfo;
- if (!(encl->flags & SGX_ENCL_CREATED))
+ if (!is_encl_created(encl))
return -EINVAL;
if (copy_from_user(&addp, arg, sizeof(addp)))
@@ -611,7 +621,7 @@ static long sgx_ioc_enclave_init(struct file *filep, void __user *arg)
struct page *initp_page;
int ret;
- if (!(encl->flags & SGX_ENCL_CREATED))
+ if (!is_encl_created(encl))
return -EINVAL;
if (copy_from_user(&einit, arg, sizeof(einit)))
--
2.22.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 3/4] x86/sgx: Reject concurrent ioctls on single enclave
2019-08-27 0:11 [PATCH 0/4] x86/sgx: Fix lock ordering bug w/ EADD Sean Christopherson
2019-08-27 0:11 ` [PATCH 1/4] x86/sgx: Ensure enclave state is visible before marking it created Sean Christopherson
2019-08-27 0:11 ` [PATCH 2/4] x86/sgx: Preserved allowed attributes during SGX_IOC_ENCLAVE_CREATE Sean Christopherson
@ 2019-08-27 0:11 ` Sean Christopherson
2019-08-27 12:11 ` Jarkko Sakkinen
2019-08-27 0:11 ` [PATCH 4/4] x86/sgx: Take encl->lock inside of mm->mmap_sem for EADD Sean Christopherson
3 siblings, 1 reply; 11+ messages in thread
From: Sean Christopherson @ 2019-08-27 0:11 UTC (permalink / raw)
To: Jarkko Sakkinen; +Cc: linux-sgx
Except for ENCLAVE_SET_ATTRIBUTE, all SGX ioctls() must be executed
serially to successfully initialize an enclave, e.g. the kernel already
strictly requires ECREATE->EADD->EINIT, and concurrent EADDs will result
in an unstable MRENCLAVE. Explicitly enforce serialization by returning
EINVAL if userspace attempts an ioctl while a different ioctl for the
same enclave is in progress.
The primary beneficiary of explicit serialization is sgx_encl_grow() as
it no longer has to deal with dropping and reacquiring encl->lock when
a new VA page needs to be allocated. Eliminating the lock juggling in
sgx_encl_grow() paves the way for fixing a lock ordering bug in
ENCLAVE_ADD_PAGE without having to also juggle mm->mmap_sem.
Serializing ioctls also fixes a race between ENCLAVE_CREATE and
ENCLAVE_SET_ATTRIBUTE, as the latter does not take encl->lock, e.g.
concurrent updates to allowed_attributes could result in a stale
value. The race could also be fixed by taking encl->lock, but that
is less desirable as doing so would unnecessarily interfere with EPC
page reclaim.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
arch/x86/kernel/cpu/sgx/driver.c | 1 +
arch/x86/kernel/cpu/sgx/encl.h | 1 +
arch/x86/kernel/cpu/sgx/ioctl.c | 91 ++++++++++++++------------------
3 files changed, 43 insertions(+), 50 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
index e740d71e2311..7ebd66050400 100644
--- a/arch/x86/kernel/cpu/sgx/driver.c
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -32,6 +32,7 @@ static int sgx_open(struct inode *inode, struct file *file)
return -ENOMEM;
kref_init(&encl->refcount);
+ atomic_set(&encl->in_ioctl, 0);
INIT_LIST_HEAD(&encl->va_pages);
INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
mutex_init(&encl->lock);
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 37b5c4bcda7a..1505ff204703 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -67,6 +67,7 @@ struct sgx_encl_mm {
};
struct sgx_encl {
+ atomic_t in_ioctl;
unsigned int flags;
u64 secs_attributes;
u64 allowed_attributes;
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 103851babc75..170ed538b02b 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -14,8 +14,7 @@
#include <linux/suspend.h>
#include "driver.h"
-static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl,
- unsigned int disallowed_flags)
+static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl)
{
struct sgx_va_page *va_page = NULL;
void *err;
@@ -23,36 +22,20 @@ static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl,
BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
(SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
- if (encl->flags & disallowed_flags)
- return ERR_PTR(-EFAULT);
-
if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
- mutex_unlock(&encl->lock);
-
va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
- if (!va_page) {
- mutex_lock(&encl->lock);
+ if (!va_page)
return ERR_PTR(-ENOMEM);
- }
va_page->epc_page = sgx_alloc_va_page();
- mutex_lock(&encl->lock);
-
if (IS_ERR(va_page->epc_page)) {
err = ERR_CAST(va_page->epc_page);
kfree(va_page);
return err;
- } else if (encl->flags & disallowed_flags) {
- sgx_free_page(va_page->epc_page);
- kfree(va_page);
- return ERR_PTR(-EFAULT);
- } else if (encl->page_cnt % SGX_VA_SLOT_COUNT) {
- sgx_free_page(va_page->epc_page);
- kfree(va_page);
- va_page = NULL;
- } else {
- list_add(&va_page->list, &encl->va_pages);
}
+
+ WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT);
+ list_add(&va_page->list, &encl->va_pages);
}
encl->page_cnt++;
return va_page;
@@ -183,13 +166,12 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
struct file *backing;
long ret;
- mutex_lock(&encl->lock);
+ if (is_encl_created(encl))
+ return -EINVAL;
- va_page = sgx_encl_grow(encl, SGX_ENCL_CREATED | SGX_ENCL_DEAD);
- if (IS_ERR(va_page)) {
- ret = PTR_ERR(va_page);
- goto err_out_unlock;
- }
+ va_page = sgx_encl_grow(encl);
+ if (IS_ERR(va_page))
+ return PTR_ERR(va_page);
ssaframesize = sgx_calc_ssaframesize(secs->miscselect, secs->xfrm);
if (sgx_validate_secs(secs, ssaframesize)) {
@@ -239,13 +221,12 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
/*
* Set SGX_ENCL_CREATED only after the enclave is fully prepped. This
- * allows other flows to check if the enclave has been created without
- * taking encl->lock. Pairs with smp_rmb() in is_encl_created().
+ * allows setting and checking enclave creation without having to take
+ * encl->lock. Pairs with smp_rmb() in is_encl_created().
*/
smp_wmb();
encl->flags |= SGX_ENCL_CREATED;
- mutex_unlock(&encl->lock);
return 0;
err_out:
@@ -259,8 +240,6 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
err_out_shrink:
sgx_encl_shrink(encl, va_page);
-err_out_unlock:
- mutex_unlock(&encl->lock);
return ret;
}
@@ -280,9 +259,8 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
* 0 on success,
* -errno otherwise
*/
-static long sgx_ioc_enclave_create(struct file *filep, void __user *arg)
+static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg)
{
- struct sgx_encl *encl = filep->private_data;
struct sgx_enclave_create ecreate;
struct page *secs_page;
struct sgx_secs *secs;
@@ -414,14 +392,14 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
return PTR_ERR(epc_page);
}
- mutex_lock(&encl->lock);
-
- va_page = sgx_encl_grow(encl, SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD);
+ va_page = sgx_encl_grow(encl);
if (IS_ERR(va_page)) {
ret = PTR_ERR(va_page);
goto err_out_free;
}
+ mutex_lock(&encl->lock);
+
ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
encl_page);
if (ret)
@@ -440,13 +418,13 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
PFN_DOWN(encl_page->desc));
err_out_shrink:
+ mutex_unlock(&encl->lock);
sgx_encl_shrink(encl, va_page);
err_out_free:
sgx_free_page(epc_page);
kfree(encl_page);
- mutex_unlock(&encl->lock);
return ret;
}
@@ -482,9 +460,8 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
* -ENOMEM if any memory allocation, including EPC, fails,
* -errno otherwise
*/
-static long sgx_ioc_enclave_add_page(struct file *filep, void __user *arg)
+static long sgx_ioc_enclave_add_page(struct sgx_encl *encl, void __user *arg)
{
- struct sgx_encl *encl = filep->private_data;
struct sgx_enclave_add_page addp;
struct sgx_secinfo secinfo;
@@ -612,9 +589,8 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
* SGX error code on EINIT failure,
* -errno otherwise
*/
-static long sgx_ioc_enclave_init(struct file *filep, void __user *arg)
+static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
{
- struct sgx_encl *encl = filep->private_data;
struct sgx_einittoken *einittoken;
struct sgx_sigstruct *sigstruct;
struct sgx_enclave_init einit;
@@ -668,9 +644,9 @@ static long sgx_ioc_enclave_init(struct file *filep, void __user *arg)
*
* Return: 0 on success, -errno otherwise
*/
-static long sgx_ioc_enclave_set_attribute(struct file *filep, void __user *arg)
+static long sgx_ioc_enclave_set_attribute(struct sgx_encl *encl,
+ void __user *arg)
{
- struct sgx_encl *encl = filep->private_data;
struct sgx_enclave_set_attribute params;
struct file *attribute_file;
int ret;
@@ -697,16 +673,31 @@ static long sgx_ioc_enclave_set_attribute(struct file *filep, void __user *arg)
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
+ struct sgx_encl *encl = filep->private_data;
+ int ret;
+
+ if (!atomic_add_unless(&encl->in_ioctl, 1, 1))
+ return -EINVAL;
+
switch (cmd) {
case SGX_IOC_ENCLAVE_CREATE:
- return sgx_ioc_enclave_create(filep, (void __user *)arg);
+ ret = sgx_ioc_enclave_create(encl, (void __user *)arg);
+ break;
case SGX_IOC_ENCLAVE_ADD_PAGE:
- return sgx_ioc_enclave_add_page(filep, (void __user *)arg);
+ ret = sgx_ioc_enclave_add_page(encl, (void __user *)arg);
+ break;
case SGX_IOC_ENCLAVE_INIT:
- return sgx_ioc_enclave_init(filep, (void __user *)arg);
+ ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
+ break;
case SGX_IOC_ENCLAVE_SET_ATTRIBUTE:
- return sgx_ioc_enclave_set_attribute(filep, (void __user *)arg);
+ ret = sgx_ioc_enclave_set_attribute(encl, (void __user *)arg);
+ break;
default:
- return -ENOIOCTLCMD;
+ ret = -ENOIOCTLCMD;
+ break;
}
+
+ atomic_dec(&encl->in_ioctl);
+
+ return ret;
}
--
2.22.0
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 4/4] x86/sgx: Take encl->lock inside of mm->mmap_sem for EADD
2019-08-27 0:11 [PATCH 0/4] x86/sgx: Fix lock ordering bug w/ EADD Sean Christopherson
` (2 preceding siblings ...)
2019-08-27 0:11 ` [PATCH 3/4] x86/sgx: Reject concurrent ioctls on single enclave Sean Christopherson
@ 2019-08-27 0:11 ` Sean Christopherson
2019-08-27 12:21 ` Jarkko Sakkinen
3 siblings, 1 reply; 11+ messages in thread
From: Sean Christopherson @ 2019-08-27 0:11 UTC (permalink / raw)
To: Jarkko Sakkinen; +Cc: linux-sgx
Reverse the order in which encl->lock and mm->mmap_sem are taken during
ENCLAVE_ADD_PAGE so as to adhere to SGX's lock ordering requirements.
Attempting to acquire mm->mmap_sem while holding encl->lock can result
in deadlock.
Refactor EEXTEND and the final bookkeeping out of __sgx_encl_add_page()
so that mm->mmap_sem can be dropped after EADD without spreading the
lock/unlock across multiple functions.
Reported-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
arch/x86/kernel/cpu/sgx/ioctl.c | 55 ++++++++++++++++++++-------------
1 file changed, 33 insertions(+), 22 deletions(-)
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 170ed538b02b..4a9ae1090433 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -317,43 +317,40 @@ static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
static int __sgx_encl_add_page(struct sgx_encl *encl,
struct sgx_encl_page *encl_page,
struct sgx_epc_page *epc_page,
- struct sgx_secinfo *secinfo, unsigned long src,
- unsigned long mrmask)
+ struct sgx_secinfo *secinfo, unsigned long src)
{
struct sgx_pageinfo pginfo;
struct vm_area_struct *vma;
int ret;
- int i;
pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
pginfo.metadata = (unsigned long)secinfo;
pginfo.contents = src;
- down_read(¤t->mm->mmap_sem);
-
/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
if (encl_page->vm_max_prot_bits & VM_EXEC) {
vma = find_vma(current->mm, src);
- if (!vma) {
- up_read(¤t->mm->mmap_sem);
+ if (!vma)
return -EFAULT;
- }
- if (!(vma->vm_flags & VM_MAYEXEC)) {
- up_read(¤t->mm->mmap_sem);
+ if (!(vma->vm_flags & VM_MAYEXEC))
return -EACCES;
- }
}
__uaccess_begin();
ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
__uaccess_end();
- up_read(¤t->mm->mmap_sem);
+ return ret ? -EFAULT : 0;
+}
- if (ret)
- return -EFAULT;
+static int __sgx_encl_extend(struct sgx_encl *encl,
+ struct sgx_epc_page *epc_page,
+ unsigned long mrmask)
+{
+ int ret;
+ int i;
for_each_set_bit(i, &mrmask, 16) {
ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
@@ -364,12 +361,6 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
return -EFAULT;
}
}
-
- encl_page->encl = encl;
- encl_page->epc_page = epc_page;
- encl->secs_child_cnt++;
- sgx_mark_page_reclaimable(encl_page->epc_page);
-
return 0;
}
@@ -398,19 +389,39 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
goto err_out_free;
}
+ down_read(¤t->mm->mmap_sem);
+
mutex_lock(&encl->lock);
+ /*
+ * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
+ * can't be gracefully unwound, while failure on EADD/EXTEND is limited
+ * to userspace errors (or kernel/hardware bugs).
+ */
ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
encl_page);
- if (ret)
+ if (ret) {
+ up_read(¤t->mm->mmap_sem);
goto err_out_shrink;
+ }
ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
- addp->src, addp->mrmask);
+ addp->src);
+ up_read(¤t->mm->mmap_sem);
+
+ if (ret)
+ goto err_out;
+
+ ret = __sgx_encl_extend(encl, epc_page, addp->mrmask);
if (ret)
goto err_out;
+ encl_page->encl = encl;
+ encl_page->epc_page = epc_page;
+ encl->secs_child_cnt++;
+ sgx_mark_page_reclaimable(encl_page->epc_page);
mutex_unlock(&encl->lock);
+
return 0;
err_out:
--
2.22.0
^ permalink raw reply related [flat|nested] 11+ messages in thread