Message ID | 20200406205626.33264-1-jarkko.sakkinen@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v4] x86/sgx: Fix deadlock and race conditions between fork() and EPC reclaim | expand |
On Mon, Apr 06, 2020 at 11:56:26PM +0300, Jarkko Sakkinen wrote: > From: Sean Christopherson <sean.j.christopherson@intel.com> spin_lock(&encl->mm_lock); > + > list_add_rcu(&encl_mm->list, &encl->mm_list); > - spin_unlock(&encl->mm_lock); > > - synchronize_srcu(&encl->srcu); > + /* Even if the CPU does not reorder writes, a compiler might. */ The preferred (by maintainers) style of comment for smp_wmb()/smp_rmb() comments is to explicitly call out the associated reader/writer. If you want to go with a minimal comment, my vote is for something like: /* * Add to list before updating version. Pairs the with smp_rmb() in * sgx_reclaimer_block(). */ And if you want to go really spartan, I'd take: /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ over a generic comment about the compiler reordering instructions. > + smp_wmb(); > + encl->mm_list_version++; > + > + spin_unlock(&encl->mm_lock); > > return 0; > } > diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h > index 44b353aa8866..f0f72e591244 100644 > --- a/arch/x86/kernel/cpu/sgx/encl.h > +++ b/arch/x86/kernel/cpu/sgx/encl.h > @@ -74,6 +74,7 @@ struct sgx_encl { > struct mutex lock; > struct list_head mm_list; > spinlock_t mm_lock; > + unsigned long mm_list_version; > struct file *backing; > struct kref refcount; > struct srcu_struct srcu; > diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c > index 39f0ddefbb79..5e089f0db201 100644 > --- a/arch/x86/kernel/cpu/sgx/reclaim.c > +++ b/arch/x86/kernel/cpu/sgx/reclaim.c > @@ -184,28 +184,39 @@ static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) > struct sgx_encl_page *page = epc_page->owner; > unsigned long addr = SGX_ENCL_PAGE_ADDR(page); > struct sgx_encl *encl = page->encl; > + unsigned long mm_list_version; > struct sgx_encl_mm *encl_mm; > struct vm_area_struct *vma; > int idx, ret; > > - idx = srcu_read_lock(&encl->srcu); > + do { > + mm_list_version = encl->mm_list_version; > > - list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { > - if (!mmget_not_zero(encl_mm->mm)) > - continue; > + /* > + * Fence the read. This guarantees that we don't mutate the old > + * list with a new version. > + */ As above, would prefer something like: /* * Read the version before walking the list. Pairs with the * smp_wmb() in sgx_encl_mm_add(). */ or just /* Pairs with the smp_wmb() in sgx_encl_mm_add(). */ > + smp_rmb(); > > - down_read(&encl_mm->mm->mmap_sem); > + idx = srcu_read_lock(&encl->srcu); > > - ret = sgx_encl_find(encl_mm->mm, addr, &vma); > - if (!ret && encl == vma->vm_private_data) > - zap_vma_ptes(vma, addr, PAGE_SIZE); > + list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { > + if (!mmget_not_zero(encl_mm->mm)) > + continue; > > - up_read(&encl_mm->mm->mmap_sem); > + down_read(&encl_mm->mm->mmap_sem); > > - mmput_async(encl_mm->mm); > - } > + ret = sgx_encl_find(encl_mm->mm, addr, &vma); > + if (!ret && encl == vma->vm_private_data) > + zap_vma_ptes(vma, addr, PAGE_SIZE); > > - srcu_read_unlock(&encl->srcu, idx); > + up_read(&encl_mm->mm->mmap_sem); > + > + mmput_async(encl_mm->mm); > + } > + > + srcu_read_unlock(&encl->srcu, idx); > + } while (unlikely(encl->mm_list_version != mm_list_version)); > > mutex_lock(&encl->lock); > > @@ -250,6 +261,11 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) > struct sgx_encl_mm *encl_mm; > int idx; > > + /* > + * Can race with sgx_encl_mm_add(), but ETRACK has already been > + * executed, which means that the CPUs running in the new mm will enter > + * into the enclave with a fresh epoch. > + */ > cpumask_clear(cpumask); > > idx = srcu_read_lock(&encl->srcu); > -- > 2.25.1 >
On Mon, Apr 13, 2020 at 09:32:34PM -0700, Sean Christopherson wrote: > On Mon, Apr 06, 2020 at 11:56:26PM +0300, Jarkko Sakkinen wrote: > > From: Sean Christopherson <sean.j.christopherson@intel.com> > spin_lock(&encl->mm_lock); > > + > > list_add_rcu(&encl_mm->list, &encl->mm_list); > > - spin_unlock(&encl->mm_lock); > > > > - synchronize_srcu(&encl->srcu); > > + /* Even if the CPU does not reorder writes, a compiler might. */ > > The preferred (by maintainers) style of comment for smp_wmb()/smp_rmb() > comments is to explicitly call out the associated reader/writer. If you > want to go with a minimal comment, my vote is for something like: > > /* > * Add to list before updating version. Pairs the with smp_rmb() in > * sgx_reclaimer_block(). > */ > > And if you want to go really spartan, I'd take: > > /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ > > over a generic comment about the compiler reordering instructions. Thaks Sean, makes sense, I'll go with your "spartan" suggestion. /Jarkko
On Tue, Apr 14, 2020 at 10:17:49AM +0300, Jarkko Sakkinen wrote: > On Mon, Apr 13, 2020 at 09:32:34PM -0700, Sean Christopherson wrote: > > On Mon, Apr 06, 2020 at 11:56:26PM +0300, Jarkko Sakkinen wrote: > > > From: Sean Christopherson <sean.j.christopherson@intel.com> > > spin_lock(&encl->mm_lock); > > > + > > > list_add_rcu(&encl_mm->list, &encl->mm_list); > > > - spin_unlock(&encl->mm_lock); > > > > > > - synchronize_srcu(&encl->srcu); > > > + /* Even if the CPU does not reorder writes, a compiler might. */ > > > > The preferred (by maintainers) style of comment for smp_wmb()/smp_rmb() > > comments is to explicitly call out the associated reader/writer. If you > > want to go with a minimal comment, my vote is for something like: > > > > /* > > * Add to list before updating version. Pairs the with smp_rmb() in > > * sgx_reclaimer_block(). > > */ > > > > And if you want to go really spartan, I'd take: > > > > /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ > > > > over a generic comment about the compiler reordering instructions. > > Thaks Sean, makes sense, I'll go with your "spartan" suggestion. Updated, ready to squash? /Jarkko
On Tue, Apr 14, 2020 at 09:45:28PM +0300, Jarkko Sakkinen wrote: > On Tue, Apr 14, 2020 at 10:17:49AM +0300, Jarkko Sakkinen wrote: > > On Mon, Apr 13, 2020 at 09:32:34PM -0700, Sean Christopherson wrote: > > > On Mon, Apr 06, 2020 at 11:56:26PM +0300, Jarkko Sakkinen wrote: > > > > From: Sean Christopherson <sean.j.christopherson@intel.com> > > > spin_lock(&encl->mm_lock); > > > > + > > > > list_add_rcu(&encl_mm->list, &encl->mm_list); > > > > - spin_unlock(&encl->mm_lock); > > > > > > > > - synchronize_srcu(&encl->srcu); > > > > + /* Even if the CPU does not reorder writes, a compiler might. */ > > > > > > The preferred (by maintainers) style of comment for smp_wmb()/smp_rmb() > > > comments is to explicitly call out the associated reader/writer. If you > > > want to go with a minimal comment, my vote is for something like: > > > > > > /* > > > * Add to list before updating version. Pairs the with smp_rmb() in > > > * sgx_reclaimer_block(). > > > */ > > > > > > And if you want to go really spartan, I'd take: > > > > > > /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ > > > > > > over a generic comment about the compiler reordering instructions. > > > > Thaks Sean, makes sense, I'll go with your "spartan" suggestion. > > Updated, ready to squash? Any objection to using the spartan comment for the smb_rmb() in sgx_reclaimer_block() as well?
On Wed, Apr 15, 2020 at 09:28:10PM -0700, Sean Christopherson wrote: > On Tue, Apr 14, 2020 at 09:45:28PM +0300, Jarkko Sakkinen wrote: > > On Tue, Apr 14, 2020 at 10:17:49AM +0300, Jarkko Sakkinen wrote: > > > On Mon, Apr 13, 2020 at 09:32:34PM -0700, Sean Christopherson wrote: > > > > On Mon, Apr 06, 2020 at 11:56:26PM +0300, Jarkko Sakkinen wrote: > > > > > From: Sean Christopherson <sean.j.christopherson@intel.com> > > > > spin_lock(&encl->mm_lock); > > > > > + > > > > > list_add_rcu(&encl_mm->list, &encl->mm_list); > > > > > - spin_unlock(&encl->mm_lock); > > > > > > > > > > - synchronize_srcu(&encl->srcu); > > > > > + /* Even if the CPU does not reorder writes, a compiler might. */ > > > > > > > > The preferred (by maintainers) style of comment for smp_wmb()/smp_rmb() > > > > comments is to explicitly call out the associated reader/writer. If you > > > > want to go with a minimal comment, my vote is for something like: > > > > > > > > /* > > > > * Add to list before updating version. Pairs the with smp_rmb() in > > > > * sgx_reclaimer_block(). > > > > */ > > > > > > > > And if you want to go really spartan, I'd take: > > > > > > > > /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ > > > > > > > > over a generic comment about the compiler reordering instructions. > > > > > > Thaks Sean, makes sense, I'll go with your "spartan" suggestion. > > > > Updated, ready to squash? > > Any objection to using the spartan comment for the smb_rmb() in > sgx_reclaimer_block() as well? For sure. I think here the role of the comment is to help with the navigation. /Jarkko the navigation.
On Thu, Apr 16, 2020 at 08:25:01PM +0300, Jarkko Sakkinen wrote: > On Wed, Apr 15, 2020 at 09:28:10PM -0700, Sean Christopherson wrote: > > On Tue, Apr 14, 2020 at 09:45:28PM +0300, Jarkko Sakkinen wrote: > > > On Tue, Apr 14, 2020 at 10:17:49AM +0300, Jarkko Sakkinen wrote: > > > > On Mon, Apr 13, 2020 at 09:32:34PM -0700, Sean Christopherson wrote: > > > > > On Mon, Apr 06, 2020 at 11:56:26PM +0300, Jarkko Sakkinen wrote: > > > > > > From: Sean Christopherson <sean.j.christopherson@intel.com> > > > > > spin_lock(&encl->mm_lock); > > > > > > + > > > > > > list_add_rcu(&encl_mm->list, &encl->mm_list); > > > > > > - spin_unlock(&encl->mm_lock); > > > > > > > > > > > > - synchronize_srcu(&encl->srcu); > > > > > > + /* Even if the CPU does not reorder writes, a compiler might. */ > > > > > > > > > > The preferred (by maintainers) style of comment for smp_wmb()/smp_rmb() > > > > > comments is to explicitly call out the associated reader/writer. If you > > > > > want to go with a minimal comment, my vote is for something like: > > > > > > > > > > /* > > > > > * Add to list before updating version. Pairs the with smp_rmb() in > > > > > * sgx_reclaimer_block(). > > > > > */ > > > > > > > > > > And if you want to go really spartan, I'd take: > > > > > > > > > > /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ > > > > > > > > > > over a generic comment about the compiler reordering instructions. > > > > > > > > Thaks Sean, makes sense, I'll go with your "spartan" suggestion. > > > > > > Updated, ready to squash? > > > > Any objection to using the spartan comment for the smb_rmb() in > > sgx_reclaimer_block() as well? > > For sure. I think here the role of the comment is to help with > the navigation. > > /Jarkko Finally squashed. /Jarkko
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index e0124a2f22d5..1646c3d1839c 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -196,6 +196,9 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) struct sgx_encl_mm *encl_mm; int ret; + /* mm_list can be accessed only by a single thread at a time. */ + lockdep_assert_held_write(&mm->mmap_sem); + if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) return -EINVAL; @@ -221,11 +224,21 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) return ret; } + /* + * The page reclaimer uses list version for synchronization instead of + * synchronize_scru() because otherwise we could conflict with + * dup_mmap(). + */ + spin_lock(&encl->mm_lock); + list_add_rcu(&encl_mm->list, &encl->mm_list); - spin_unlock(&encl->mm_lock); - synchronize_srcu(&encl->srcu); + /* Even if the CPU does not reorder writes, a compiler might. */ + smp_wmb(); + encl->mm_list_version++; + + spin_unlock(&encl->mm_lock); return 0; } diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h index 44b353aa8866..f0f72e591244 100644 --- a/arch/x86/kernel/cpu/sgx/encl.h +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -74,6 +74,7 @@ struct sgx_encl { struct mutex lock; struct list_head mm_list; spinlock_t mm_lock; + unsigned long mm_list_version; struct file *backing; struct kref refcount; struct srcu_struct srcu; diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c index 39f0ddefbb79..5e089f0db201 100644 --- a/arch/x86/kernel/cpu/sgx/reclaim.c +++ b/arch/x86/kernel/cpu/sgx/reclaim.c @@ -184,28 +184,39 @@ static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) struct sgx_encl_page *page = epc_page->owner; unsigned long addr = SGX_ENCL_PAGE_ADDR(page); struct sgx_encl *encl = page->encl; + unsigned long mm_list_version; struct sgx_encl_mm *encl_mm; struct vm_area_struct *vma; int idx, ret; - idx = srcu_read_lock(&encl->srcu); + do { + mm_list_version = encl->mm_list_version; - list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { - if (!mmget_not_zero(encl_mm->mm)) - continue; + /* + * Fence the read. This guarantees that we don't mutate the old + * list with a new version. + */ + smp_rmb(); - down_read(&encl_mm->mm->mmap_sem); + idx = srcu_read_lock(&encl->srcu); - ret = sgx_encl_find(encl_mm->mm, addr, &vma); - if (!ret && encl == vma->vm_private_data) - zap_vma_ptes(vma, addr, PAGE_SIZE); + list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { + if (!mmget_not_zero(encl_mm->mm)) + continue; - up_read(&encl_mm->mm->mmap_sem); + down_read(&encl_mm->mm->mmap_sem); - mmput_async(encl_mm->mm); - } + ret = sgx_encl_find(encl_mm->mm, addr, &vma); + if (!ret && encl == vma->vm_private_data) + zap_vma_ptes(vma, addr, PAGE_SIZE); - srcu_read_unlock(&encl->srcu, idx); + up_read(&encl_mm->mm->mmap_sem); + + mmput_async(encl_mm->mm); + } + + srcu_read_unlock(&encl->srcu, idx); + } while (unlikely(encl->mm_list_version != mm_list_version)); mutex_lock(&encl->lock); @@ -250,6 +261,11 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) struct sgx_encl_mm *encl_mm; int idx; + /* + * Can race with sgx_encl_mm_add(), but ETRACK has already been + * executed, which means that the CPUs running in the new mm will enter + * into the enclave with a fresh epoch. + */ cpumask_clear(cpumask); idx = srcu_read_lock(&encl->srcu);