All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <sean.j.christopherson@intel.com>
To: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
Cc: linux-sgx@vger.kernel.org
Subject: [PATCH 4/4] x86/sgx: Take encl->lock inside of mm->mmap_sem for EADD
Date: Mon, 26 Aug 2019 17:11:28 -0700	[thread overview]
Message-ID: <20190827001128.25066-5-sean.j.christopherson@intel.com> (raw)
In-Reply-To: <20190827001128.25066-1-sean.j.christopherson@intel.com>

Reverse the order in which encl->lock and mm->mmap_sem are taken during
ENCLAVE_ADD_PAGE so as to adhere to SGX's lock ordering requirements.
Attempting to acquire mm->mmap_sem while holding encl->lock can result
in deadlock.

Refactor EEXTEND and the final bookkeeping out of __sgx_encl_add_page()
so that mm->mmap_sem can be dropped after EADD without spreading the
lock/unlock across multiple functions.

Reported-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/ioctl.c | 55 ++++++++++++++++++++-------------
 1 file changed, 33 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 170ed538b02b..4a9ae1090433 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -317,43 +317,40 @@ static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
 static int __sgx_encl_add_page(struct sgx_encl *encl,
 			       struct sgx_encl_page *encl_page,
 			       struct sgx_epc_page *epc_page,
-			       struct sgx_secinfo *secinfo, unsigned long src,
-			       unsigned long mrmask)
+			       struct sgx_secinfo *secinfo, unsigned long src)
 {
 	struct sgx_pageinfo pginfo;
 	struct vm_area_struct *vma;
 	int ret;
-	int i;
 
 	pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
 	pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
 	pginfo.metadata = (unsigned long)secinfo;
 	pginfo.contents = src;
 
-	down_read(&current->mm->mmap_sem);
-
 	/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
 	if (encl_page->vm_max_prot_bits & VM_EXEC) {
 		vma = find_vma(current->mm, src);
-		if (!vma) {
-			up_read(&current->mm->mmap_sem);
+		if (!vma)
 			return -EFAULT;
-		}
 
-		if (!(vma->vm_flags & VM_MAYEXEC)) {
-			up_read(&current->mm->mmap_sem);
+		if (!(vma->vm_flags & VM_MAYEXEC))
 			return -EACCES;
-		}
 	}
 
 	__uaccess_begin();
 	ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
 	__uaccess_end();
 
-	up_read(&current->mm->mmap_sem);
+	return ret ? -EFAULT : 0;
+}
 
-	if (ret)
-		return -EFAULT;
+static int __sgx_encl_extend(struct sgx_encl *encl,
+			     struct sgx_epc_page *epc_page,
+			     unsigned long mrmask)
+{
+	int ret;
+	int i;
 
 	for_each_set_bit(i, &mrmask, 16) {
 		ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
@@ -364,12 +361,6 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
 			return -EFAULT;
 		}
 	}
-
-	encl_page->encl = encl;
-	encl_page->epc_page = epc_page;
-	encl->secs_child_cnt++;
-	sgx_mark_page_reclaimable(encl_page->epc_page);
-
 	return 0;
 }
 
@@ -398,19 +389,39 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
 		goto err_out_free;
 	}
 
+	down_read(&current->mm->mmap_sem);
+
 	mutex_lock(&encl->lock);
 
+	/*
+	 * Insert prior to EADD in case of OOM.  EADD modifies MRENCLAVE, i.e.
+	 * can't be gracefully unwound, while failure on EADD/EXTEND is limited
+	 * to userspace errors (or kernel/hardware bugs).
+	 */
 	ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
 				encl_page);
-	if (ret)
+	if (ret) {
+		up_read(&current->mm->mmap_sem);
 		goto err_out_shrink;
+	}
 
 	ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
-				  addp->src, addp->mrmask);
+				  addp->src);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret)
+		goto err_out;
+
+	ret = __sgx_encl_extend(encl, epc_page, addp->mrmask);
 	if (ret)
 		goto err_out;
 
+	encl_page->encl = encl;
+	encl_page->epc_page = epc_page;
+	encl->secs_child_cnt++;
+	sgx_mark_page_reclaimable(encl_page->epc_page);
 	mutex_unlock(&encl->lock);
+
 	return 0;
 
 err_out:
-- 
2.22.0


  parent reply	other threads:[~2019-08-27  0:13 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-27  0:11 [PATCH 0/4] x86/sgx: Fix lock ordering bug w/ EADD Sean Christopherson
2019-08-27  0:11 ` [PATCH 1/4] x86/sgx: Ensure enclave state is visible before marking it created Sean Christopherson
2019-08-27 11:20   ` Jarkko Sakkinen
2019-08-27 16:42     ` Sean Christopherson
2019-08-27  0:11 ` [PATCH 2/4] x86/sgx: Preserved allowed attributes during SGX_IOC_ENCLAVE_CREATE Sean Christopherson
2019-08-27 12:12   ` Jarkko Sakkinen
2019-08-27 12:25     ` Jarkko Sakkinen
2019-08-27  0:11 ` [PATCH 3/4] x86/sgx: Reject concurrent ioctls on single enclave Sean Christopherson
2019-08-27 12:11   ` Jarkko Sakkinen
2019-08-27  0:11 ` Sean Christopherson [this message]
2019-08-27 12:21   ` [PATCH 4/4] x86/sgx: Take encl->lock inside of mm->mmap_sem for EADD Jarkko Sakkinen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190827001128.25066-5-sean.j.christopherson@intel.com \
    --to=sean.j.christopherson@intel.com \
    --cc=jarkko.sakkinen@linux.intel.com \
    --cc=linux-sgx@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.