@@ -403,11 +403,15 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
*/
ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
encl_page);
- if (ret)
+ if (ret) {
+ up_read(¤t->mm->mmap_sem);
goto err_out_unlock;
+ }
ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
addp->src);
+ up_read(¤t->mm->mmap_sem);
+
if (ret)
goto err_out;
@@ -427,7 +431,6 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
sgx_mark_page_reclaimable(encl_page->epc_page);
mutex_unlock(&encl->lock);
- up_read(¤t->mm->mmap_sem);
return ret;
err_out:
@@ -437,7 +440,6 @@ static int sgx_encl_add_page(struct sgx_encl *encl,
err_out_unlock:
sgx_encl_shrink(encl, va_page);
mutex_unlock(&encl->lock);
- up_read(¤t->mm->mmap_sem);
err_out_free:
sgx_free_page(epc_page);
Drop mmap_sem, which needs to be held for read across EADD, prior to doing EEXTEND on the newly added page to avoid holding mmap_sem for an extended duration. EEXTEND doesn't access user pages and holding encl->lock without mmap_sem is perfectly ok, while EEXTEND is a _slow_ operation, to the point where it operates on 256-byte chunks instead of 4k pages to maintain a reasonable latency for a single instruction. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kernel/cpu/sgx/ioctl.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)