@@ -307,43 +307,40 @@ static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
static int __sgx_encl_add_page(struct sgx_encl *encl,
struct sgx_encl_page *encl_page,
struct sgx_epc_page *epc_page,
- struct sgx_secinfo *secinfo, unsigned long src,
- unsigned long mrmask)
+ struct sgx_secinfo *secinfo, unsigned long src)
{
struct sgx_pageinfo pginfo;
struct vm_area_struct *vma;
int ret;
- int i;
pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
pginfo.metadata = (unsigned long)secinfo;
pginfo.contents = src;
- down_read(¤t->mm->mmap_sem);
-
/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
if (encl_page->vm_max_prot_bits & VM_EXEC) {
vma = find_vma(current->mm, src);
- if (!vma) {
- up_read(¤t->mm->mmap_sem);
+ if (!vma)
return -EFAULT;
- }
- if (!(vma->vm_flags & VM_MAYEXEC)) {
- up_read(¤t->mm->mmap_sem);
+ if (!(vma->vm_flags & VM_MAYEXEC))
return -EACCES;
- }
}
__uaccess_begin();
ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
__uaccess_end();
- up_read(¤t->mm->mmap_sem);
+ return ret ? -EFAULT : 0;
+}
- if (ret)
- return -EFAULT;
+static int __sgx_encl_extend(struct sgx_encl *encl,
+ struct sgx_epc_page *epc_page,
+ unsigned long mrmask)
+{
+ int ret;
+ int i;
for_each_set_bit(i, &mrmask, 16) {
ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
@@ -354,12 +351,6 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
return -EFAULT;
}
}
-
- encl_page->encl = encl;
- encl_page->epc_page = epc_page;
- encl->secs_child_cnt++;
- sgx_mark_page_reclaimable(encl_page->epc_page);
-
return 0;
}
@@ -388,19 +379,39 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
goto err_out_free;
}
+ down_read(¤t->mm->mmap_sem);
+
mutex_lock(&encl->lock);
+ /*
+ * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e.
+ * can't be gracefully unwound, while failure on EADD/EXTEND is limited
+ * to userspace errors (or kernel/hardware bugs).
+ */
ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
encl_page);
- if (ret)
+ if (ret) {
+ up_read(¤t->mm->mmap_sem);
goto err_out_shrink;
+ }
ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
- addp->src, addp->mrmask);
+ addp->src);
+ up_read(¤t->mm->mmap_sem);
+
+ if (ret)
+ goto err_out;
+
+ ret = __sgx_encl_extend(encl, epc_page, addp->mrmask);
if (ret)
goto err_out;
+ encl_page->encl = encl;
+ encl_page->epc_page = epc_page;
+ encl->secs_child_cnt++;
+ sgx_mark_page_reclaimable(encl_page->epc_page);
mutex_unlock(&encl->lock);
+
return 0;
err_out:
Reverse the order in which encl->lock and mm->mmap_sem are taken during ENCLAVE_ADD_PAGE so as to adhere to SGX's lock ordering requirements. Refactor EEXTEND and the final bookeeping out of __sgx_encl_add_page() so that mm->mmap_sem can be dropped after EADD without spreading the lock/unlock across multiple functions. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kernel/cpu/sgx/ioctl.c | 55 ++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 22 deletions(-)