Message ID | 1490040002-31158-1-git-send-email-sean.j.christopherson@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Mar 20, 2017 at 01:00:02PM -0700, Sean Christopherson wrote: > Remove sgx_pin_mm and sgx_unpin_mm and instead directly call down_read > and up_read on encl->mm->mmap_sem. Add a check for SGX_ENCL_DEAD in > sgx_isolate_pages as it previously relied on sgx_pin_mm to check for > the enclave being dead. The other code in sgx_(un)pin_mm was either > redundant and/or unnecessary. > > As both consumers of sgx_pin_mm lock the encl after sgx_pin_mm, it is > more accurate to check SGX_ENCL_DEAD after the consumer locks the encl. > Locking the enclave for the purposes of checking SGX_ENCL_DEAD is not > functionally necessary when acquiring mmap_sem. > > Incrementing mm_count when locking encl->mm->mmap_sem is unnecessary > as mm_count is incremented when encl->mmu_notifier is registered and > is not decremented until the notifier is released in sgx_encl_release. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Again with quick skim looks good but have to postpone reviewing this a bit as the current behavior does not cause a regression. /Jarkko > --- > drivers/platform/x86/intel_sgx.h | 2 -- > drivers/platform/x86/intel_sgx_ioctl.c | 9 +++------ > drivers/platform/x86/intel_sgx_page_cache.c | 12 ++++++------ > drivers/platform/x86/intel_sgx_util.c | 27 --------------------------- > 4 files changed, 9 insertions(+), 41 deletions(-) > > diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h > index adb5b17..a4c8265 100644 > --- a/drivers/platform/x86/intel_sgx.h > +++ b/drivers/platform/x86/intel_sgx.h > @@ -217,8 +217,6 @@ int sgx_eremove(struct sgx_epc_page *epc_page); > struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr); > void sgx_zap_tcs_ptes(struct sgx_encl *encl, > struct vm_area_struct *vma); > -bool sgx_pin_mm(struct sgx_encl *encl); > -void sgx_unpin_mm(struct sgx_encl *encl); > void sgx_invalidate(struct sgx_encl *encl); > int sgx_find_encl(struct mm_struct *mm, unsigned long addr, > struct vm_area_struct **vma); > diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c > index e0e2f14..32739cf 100644 > --- a/drivers/platform/x86/intel_sgx_ioctl.c > +++ b/drivers/platform/x86/intel_sgx_ioctl.c > @@ -220,10 +220,7 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req) > if (IS_ERR(epc_page)) > return false; > > - if (!sgx_pin_mm(encl)) { > - sgx_free_page(epc_page, encl, 0); > - return false; > - } > + down_read(&encl->mm->mmap_sem); > > mutex_lock(&encl->lock); > > @@ -271,12 +268,12 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req) > list_add_tail(&encl_page->load_list, &encl->load_list); > > mutex_unlock(&encl->lock); > - sgx_unpin_mm(encl); > + up_read(&encl->mm->mmap_sem); > return true; > out: > sgx_free_page(epc_page, encl, 0); > mutex_unlock(&encl->lock); > - sgx_unpin_mm(encl); > + up_read(&encl->mm->mmap_sem); > return false; > } > > diff --git a/drivers/platform/x86/intel_sgx_page_cache.c b/drivers/platform/x86/intel_sgx_page_cache.c > index e4f6b95..35dbf8d 100644 > --- a/drivers/platform/x86/intel_sgx_page_cache.c > +++ b/drivers/platform/x86/intel_sgx_page_cache.c > @@ -195,6 +195,9 @@ static void sgx_isolate_pages(struct sgx_encl *encl, > > mutex_lock(&encl->lock); > > + if (encl->flags & SGX_ENCL_DEAD) > + goto out; > + > for (i = 0; i < nr_to_scan; i++) { > if (list_empty(&encl->load_list)) > break; > @@ -211,7 +214,7 @@ static void sgx_isolate_pages(struct sgx_encl *encl, > list_move_tail(&entry->load_list, &encl->load_list); > } > } > - > +out: > mutex_unlock(&encl->lock); > } > > @@ -392,14 +395,11 @@ static void sgx_swap_pages(unsigned long nr_to_scan) > if (!encl) > goto out; > > - if (!sgx_pin_mm(encl)) > - goto out_enclave; > - > + down_read(&encl->mm->mmap_sem); > sgx_isolate_pages(encl, &cluster, nr_to_scan); > sgx_write_pages(encl, &cluster); > - sgx_unpin_mm(encl); > + up_read(&encl->mm->mmap_sem); > > -out_enclave: > kref_put(&encl->refcount, sgx_encl_release); > out: > kref_put(&ctx->refcount, sgx_tgid_ctx_release); > diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c > index 234a5fb..2671f00 100644 > --- a/drivers/platform/x86/intel_sgx_util.c > +++ b/drivers/platform/x86/intel_sgx_util.c > @@ -147,33 +147,6 @@ void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma) > } > } > > -bool sgx_pin_mm(struct sgx_encl *encl) > -{ > - mutex_lock(&encl->lock); > - if (encl->flags & SGX_ENCL_DEAD) { > - mutex_unlock(&encl->lock); > - return false; > - } > - > - atomic_inc(&encl->mm->mm_count); > - mutex_unlock(&encl->lock); > - > - down_read(&encl->mm->mmap_sem); > - > - if (encl->flags & SGX_ENCL_DEAD) { > - sgx_unpin_mm(encl); > - return false; > - } > - > - return true; > -} > - > -void sgx_unpin_mm(struct sgx_encl *encl) > -{ > - up_read(&encl->mm->mmap_sem); > - mmdrop(encl->mm); > -} > - > void sgx_invalidate(struct sgx_encl *encl) > { > struct vm_area_struct *vma; > -- > 2.7.4 > > _______________________________________________ > intel-sgx-kernel-dev mailing list > intel-sgx-kernel-dev@lists.01.org > https://lists.01.org/mailman/listinfo/intel-sgx-kernel-dev
On Mon, Mar 20, 2017 at 01:00:02PM -0700, Sean Christopherson wrote: > Remove sgx_pin_mm and sgx_unpin_mm and instead directly call down_read > and up_read on encl->mm->mmap_sem. Add a check for SGX_ENCL_DEAD in > sgx_isolate_pages as it previously relied on sgx_pin_mm to check for > the enclave being dead. The other code in sgx_(un)pin_mm was either > redundant and/or unnecessary. > > As both consumers of sgx_pin_mm lock the encl after sgx_pin_mm, it is > more accurate to check SGX_ENCL_DEAD after the consumer locks the encl. > Locking the enclave for the purposes of checking SGX_ENCL_DEAD is not > functionally necessary when acquiring mmap_sem. These first two first paragraph are merely describing what is obvious by reading the code. > Incrementing mm_count when locking encl->mm->mmap_sem is unnecessary > as mm_count is incremented when encl->mmu_notifier is registered and > is not decremented until the notifier is released in sgx_encl_release. This justifies the change! > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> I'll test this and squash it. Thanks. /Jarkko > --- > drivers/platform/x86/intel_sgx.h | 2 -- > drivers/platform/x86/intel_sgx_ioctl.c | 9 +++------ > drivers/platform/x86/intel_sgx_page_cache.c | 12 ++++++------ > drivers/platform/x86/intel_sgx_util.c | 27 --------------------------- > 4 files changed, 9 insertions(+), 41 deletions(-) > > diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h > index adb5b17..a4c8265 100644 > --- a/drivers/platform/x86/intel_sgx.h > +++ b/drivers/platform/x86/intel_sgx.h > @@ -217,8 +217,6 @@ int sgx_eremove(struct sgx_epc_page *epc_page); > struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr); > void sgx_zap_tcs_ptes(struct sgx_encl *encl, > struct vm_area_struct *vma); > -bool sgx_pin_mm(struct sgx_encl *encl); > -void sgx_unpin_mm(struct sgx_encl *encl); > void sgx_invalidate(struct sgx_encl *encl); > int sgx_find_encl(struct mm_struct *mm, unsigned long addr, > struct vm_area_struct **vma); > diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c > index e0e2f14..32739cf 100644 > --- a/drivers/platform/x86/intel_sgx_ioctl.c > +++ b/drivers/platform/x86/intel_sgx_ioctl.c > @@ -220,10 +220,7 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req) > if (IS_ERR(epc_page)) > return false; > > - if (!sgx_pin_mm(encl)) { > - sgx_free_page(epc_page, encl, 0); > - return false; > - } > + down_read(&encl->mm->mmap_sem); > > mutex_lock(&encl->lock); > > @@ -271,12 +268,12 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req) > list_add_tail(&encl_page->load_list, &encl->load_list); > > mutex_unlock(&encl->lock); > - sgx_unpin_mm(encl); > + up_read(&encl->mm->mmap_sem); > return true; > out: > sgx_free_page(epc_page, encl, 0); > mutex_unlock(&encl->lock); > - sgx_unpin_mm(encl); > + up_read(&encl->mm->mmap_sem); > return false; > } > > diff --git a/drivers/platform/x86/intel_sgx_page_cache.c b/drivers/platform/x86/intel_sgx_page_cache.c > index e4f6b95..35dbf8d 100644 > --- a/drivers/platform/x86/intel_sgx_page_cache.c > +++ b/drivers/platform/x86/intel_sgx_page_cache.c > @@ -195,6 +195,9 @@ static void sgx_isolate_pages(struct sgx_encl *encl, > > mutex_lock(&encl->lock); > > + if (encl->flags & SGX_ENCL_DEAD) > + goto out; > + > for (i = 0; i < nr_to_scan; i++) { > if (list_empty(&encl->load_list)) > break; > @@ -211,7 +214,7 @@ static void sgx_isolate_pages(struct sgx_encl *encl, > list_move_tail(&entry->load_list, &encl->load_list); > } > } > - > +out: > mutex_unlock(&encl->lock); > } > > @@ -392,14 +395,11 @@ static void sgx_swap_pages(unsigned long nr_to_scan) > if (!encl) > goto out; > > - if (!sgx_pin_mm(encl)) > - goto out_enclave; > - > + down_read(&encl->mm->mmap_sem); > sgx_isolate_pages(encl, &cluster, nr_to_scan); > sgx_write_pages(encl, &cluster); > - sgx_unpin_mm(encl); > + up_read(&encl->mm->mmap_sem); > > -out_enclave: > kref_put(&encl->refcount, sgx_encl_release); > out: > kref_put(&ctx->refcount, sgx_tgid_ctx_release); > diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c > index 234a5fb..2671f00 100644 > --- a/drivers/platform/x86/intel_sgx_util.c > +++ b/drivers/platform/x86/intel_sgx_util.c > @@ -147,33 +147,6 @@ void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma) > } > } > > -bool sgx_pin_mm(struct sgx_encl *encl) > -{ > - mutex_lock(&encl->lock); > - if (encl->flags & SGX_ENCL_DEAD) { > - mutex_unlock(&encl->lock); > - return false; > - } > - > - atomic_inc(&encl->mm->mm_count); > - mutex_unlock(&encl->lock); > - > - down_read(&encl->mm->mmap_sem); > - > - if (encl->flags & SGX_ENCL_DEAD) { > - sgx_unpin_mm(encl); > - return false; > - } > - > - return true; > -} > - > -void sgx_unpin_mm(struct sgx_encl *encl) > -{ > - up_read(&encl->mm->mmap_sem); > - mmdrop(encl->mm); > -} > - > void sgx_invalidate(struct sgx_encl *encl) > { > struct vm_area_struct *vma; > -- > 2.7.4 > > _______________________________________________ > intel-sgx-kernel-dev mailing list > intel-sgx-kernel-dev@lists.01.org > https://lists.01.org/mailman/listinfo/intel-sgx-kernel-dev
diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h index adb5b17..a4c8265 100644 --- a/drivers/platform/x86/intel_sgx.h +++ b/drivers/platform/x86/intel_sgx.h @@ -217,8 +217,6 @@ int sgx_eremove(struct sgx_epc_page *epc_page); struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr); void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma); -bool sgx_pin_mm(struct sgx_encl *encl); -void sgx_unpin_mm(struct sgx_encl *encl); void sgx_invalidate(struct sgx_encl *encl); int sgx_find_encl(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **vma); diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c index e0e2f14..32739cf 100644 --- a/drivers/platform/x86/intel_sgx_ioctl.c +++ b/drivers/platform/x86/intel_sgx_ioctl.c @@ -220,10 +220,7 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req) if (IS_ERR(epc_page)) return false; - if (!sgx_pin_mm(encl)) { - sgx_free_page(epc_page, encl, 0); - return false; - } + down_read(&encl->mm->mmap_sem); mutex_lock(&encl->lock); @@ -271,12 +268,12 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req) list_add_tail(&encl_page->load_list, &encl->load_list); mutex_unlock(&encl->lock); - sgx_unpin_mm(encl); + up_read(&encl->mm->mmap_sem); return true; out: sgx_free_page(epc_page, encl, 0); mutex_unlock(&encl->lock); - sgx_unpin_mm(encl); + up_read(&encl->mm->mmap_sem); return false; } diff --git a/drivers/platform/x86/intel_sgx_page_cache.c b/drivers/platform/x86/intel_sgx_page_cache.c index e4f6b95..35dbf8d 100644 --- a/drivers/platform/x86/intel_sgx_page_cache.c +++ b/drivers/platform/x86/intel_sgx_page_cache.c @@ -195,6 +195,9 @@ static void sgx_isolate_pages(struct sgx_encl *encl, mutex_lock(&encl->lock); + if (encl->flags & SGX_ENCL_DEAD) + goto out; + for (i = 0; i < nr_to_scan; i++) { if (list_empty(&encl->load_list)) break; @@ -211,7 +214,7 @@ static void sgx_isolate_pages(struct sgx_encl *encl, list_move_tail(&entry->load_list, &encl->load_list); } } - +out: mutex_unlock(&encl->lock); } @@ -392,14 +395,11 @@ static void sgx_swap_pages(unsigned long nr_to_scan) if (!encl) goto out; - if (!sgx_pin_mm(encl)) - goto out_enclave; - + down_read(&encl->mm->mmap_sem); sgx_isolate_pages(encl, &cluster, nr_to_scan); sgx_write_pages(encl, &cluster); - sgx_unpin_mm(encl); + up_read(&encl->mm->mmap_sem); -out_enclave: kref_put(&encl->refcount, sgx_encl_release); out: kref_put(&ctx->refcount, sgx_tgid_ctx_release); diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c index 234a5fb..2671f00 100644 --- a/drivers/platform/x86/intel_sgx_util.c +++ b/drivers/platform/x86/intel_sgx_util.c @@ -147,33 +147,6 @@ void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma) } } -bool sgx_pin_mm(struct sgx_encl *encl) -{ - mutex_lock(&encl->lock); - if (encl->flags & SGX_ENCL_DEAD) { - mutex_unlock(&encl->lock); - return false; - } - - atomic_inc(&encl->mm->mm_count); - mutex_unlock(&encl->lock); - - down_read(&encl->mm->mmap_sem); - - if (encl->flags & SGX_ENCL_DEAD) { - sgx_unpin_mm(encl); - return false; - } - - return true; -} - -void sgx_unpin_mm(struct sgx_encl *encl) -{ - up_read(&encl->mm->mmap_sem); - mmdrop(encl->mm); -} - void sgx_invalidate(struct sgx_encl *encl) { struct vm_area_struct *vma;
Remove sgx_pin_mm and sgx_unpin_mm and instead directly call down_read and up_read on encl->mm->mmap_sem. Add a check for SGX_ENCL_DEAD in sgx_isolate_pages as it previously relied on sgx_pin_mm to check for the enclave being dead. The other code in sgx_(un)pin_mm was either redundant and/or unnecessary. As both consumers of sgx_pin_mm lock the encl after sgx_pin_mm, it is more accurate to check SGX_ENCL_DEAD after the consumer locks the encl. Locking the enclave for the purposes of checking SGX_ENCL_DEAD is not functionally necessary when acquiring mmap_sem. Incrementing mm_count when locking encl->mm->mmap_sem is unnecessary as mm_count is incremented when encl->mmu_notifier is registered and is not decremented until the notifier is released in sgx_encl_release. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- drivers/platform/x86/intel_sgx.h | 2 -- drivers/platform/x86/intel_sgx_ioctl.c | 9 +++------ drivers/platform/x86/intel_sgx_page_cache.c | 12 ++++++------ drivers/platform/x86/intel_sgx_util.c | 27 --------------------------- 4 files changed, 9 insertions(+), 41 deletions(-)