@@ -517,24 +517,22 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
return ret;
}
- mutex_lock(&encl->lock);
-
- va_page = sgx_encl_grow(encl, SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD);
- if (IS_ERR(va_page)) {
- ret = PTR_ERR(va_page);
- goto err_out_unlock;
- }
-
encl_page = sgx_encl_page_alloc(encl, addr, prot, page_type);
- if (IS_ERR(encl_page)) {
- ret = PTR_ERR(encl_page);
- goto err_out_shrink;
+ if (IS_ERR(encl_page))
+ return PTR_ERR(encl_page);
+
+ mutex_lock(&encl->lock);
+
+ va_page = sgx_encl_grow(encl, SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD);
+ if (IS_ERR(va_page)) {
+ ret = PTR_ERR(va_page);
+ goto err_out_free;
}
ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
encl_page);
if (ret)
- goto err_out_free;
+ goto err_out_shrink;
ret = __sgx_encl_add_page(encl, encl_page, data, secinfo, mrmask);
if (ret)
@@ -546,13 +544,12 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
err_out:
radix_tree_delete(&encl_page->encl->page_tree,
PFN_DOWN(encl_page->desc));
+err_out_shrink:
+ sgx_encl_shrink(encl, va_page);
+
err_out_free:
kfree(encl_page);
-err_out_shrink:
- sgx_encl_shrink(encl, va_page);
-
-err_out_unlock:
mutex_unlock(&encl->lock);
return ret;
}
Refactor sgx_encl_add_page() to allocate the encl_page prior to taking encl->lock so that the encl_page can be used to allocate its associated EPC page without having to drop and retake encl->lock. Removal of the add page worker will move EPC page allocation to sgx_encl_add_page(). Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kernel/cpu/sgx/driver/ioctl.c | 29 ++++++++++++-------------- 1 file changed, 13 insertions(+), 16 deletions(-)