diff mbox

[PATCHv7] x86/ept: defer the invalidation until the p2m lock is released

Message ID 1454343998-26294-2-git-send-email-david.vrabel@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Vrabel Feb. 1, 2016, 4:26 p.m. UTC
Holding the p2m lock while calling ept_sync_domain() is very expensive
since it does a on_selected_cpus() call.  IPIs on many socket machines
can be very slows and on_selected_cpus() is serialized.

It is safe to defer the invalidate until the p2m lock is released
except for two cases:

1. When freeing a page table page (since partial translations may be
   cached).
2. When reclaiming a zero page as part of PoD.

For these cases, add p2m_tlb_flush_sync() calls which will immediately
perform the invalidate before the page is freed or reclaimed.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
v7:
- Add some more p2m_tlb_flush_sync() calls to PoD.
- More comments.

v6:
- Move p2m_tlb_flush_sync() to immediately before p2m_free_ptp().  It was
  called all the time otherwise.

v5:
- add p2m_tlb_flush_sync() and call it before freeing pgae table pages
  and reclaiming zeroed pod pages.

v2:
- use per-p2m list for deferred pages.
- update synced_mask while holding write lock.
---
 xen/arch/x86/mm/mm-locks.h | 23 +++++++++++++++--------
 xen/arch/x86/mm/p2m-ept.c  | 42 ++++++++++++++++++++++++++++++++++--------
 xen/arch/x86/mm/p2m-pod.c  |  4 ++++
 xen/arch/x86/mm/p2m.c      | 19 +++++++++++++++++++
 xen/include/asm-x86/p2m.h  | 24 ++++++++++++++++++++++++
 5 files changed, 96 insertions(+), 16 deletions(-)

Comments

Tian, Kevin Feb. 3, 2016, 3:44 a.m. UTC | #1
> From: David Vrabel [mailto:david.vrabel@citrix.com]
> Sent: Tuesday, February 02, 2016 12:27 AM
> 
> Holding the p2m lock while calling ept_sync_domain() is very expensive
> since it does a on_selected_cpus() call.  IPIs on many socket machines
> can be very slows and on_selected_cpus() is serialized.

slows -> slow

> 
> It is safe to defer the invalidate until the p2m lock is released
> except for two cases:
> 
> 1. When freeing a page table page (since partial translations may be
>    cached).
> 2. When reclaiming a zero page as part of PoD.
> 
> For these cases, add p2m_tlb_flush_sync() calls which will immediately
> perform the invalidate before the page is freed or reclaimed.
> 
> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
[...]
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index c094320..43c7f1b 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -263,6 +263,7 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t
> *ept_entry, int l
>          unmap_domain_page(epte);
>      }
> 
> +    p2m_tlb_flush_sync(p2m);
>      p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
>  }
> 
> @@ -1095,15 +1096,10 @@ static void __ept_sync_domain(void *info)
>       */
>  }
> 
> -void ept_sync_domain(struct p2m_domain *p2m)
> +static void ept_sync_domain_prepare(struct p2m_domain *p2m)
>  {
>      struct domain *d = p2m->domain;
>      struct ept_data *ept = &p2m->ept;
> -    /* Only if using EPT and this domain has some VCPUs to dirty. */
> -    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
> -        return;
> -
> -    ASSERT(local_irq_is_enabled());
> 
>      if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
>          p2m_flush_nestedp2m(d);

should we postpone nestedp2m flush similarly, which also incurs 
on_selected_cpus when holding p2m lock?

> @@ -1116,9 +1112,38 @@ void ept_sync_domain(struct p2m_domain *p2m)
>       *    of an EP4TA reuse is still needed.
>       */
>      cpumask_setall(ept->invalidate);
> +}
> +
> +static void ept_sync_domain_mask(struct p2m_domain *p2m, const cpumask_t *mask)
> +{
> +    on_selected_cpus(mask, __ept_sync_domain, p2m, 1);
> +}
> +
> +void ept_sync_domain(struct p2m_domain *p2m)
> +{
> +    struct domain *d = p2m->domain;
> 
> -    on_selected_cpus(d->domain_dirty_cpumask,
> -                     __ept_sync_domain, p2m, 1);
> +    /* Only if using EPT and this domain has some VCPUs to dirty. */
> +    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
> +        return;
> +
> +    ept_sync_domain_prepare(p2m);
> +
> +    if ( p2m->defer_flush )
> +    {
> +        p2m->need_flush = 1;
> +        return;
> +    }
> +
> +    ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
> +}
> +
> +static void ept_flush_and_unlock(struct p2m_domain *p2m, bool_t unlock)
> +{
> +    p2m->need_flush = 0;
> +    if ( unlock )
> +        mm_write_unlock(&p2m->lock);
> +    ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
>  }
> 
>  static void ept_enable_pml(struct p2m_domain *p2m)
> @@ -1169,6 +1194,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
>      p2m->change_entry_type_range = ept_change_entry_type_range;
>      p2m->memory_type_changed = ept_memory_type_changed;
>      p2m->audit_p2m = NULL;
> +    p2m->flush_and_unlock = ept_flush_and_unlock;
> 
>      /* Set the memory type used when accessing EPT paging structures. */
>      ept->ept_mt = EPT_DEFAULT_MT;
> diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
> index ea16d3e..35835d1 100644
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -626,6 +626,7 @@ p2m_pod_decrease_reservation(struct domain *d,
> 
>              p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
>                            p2m_invalid, p2m->default_access);
> +            p2m_tlb_flush_sync(p2m);
>              for ( j = 0; j < n; ++j )
>                  set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
>              p2m_pod_cache_add(p2m, page, cur_order);
> @@ -755,6 +756,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m,
> unsigned long gfn)
>      /* Try to remove the page, restoring old mapping if it fails. */
>      p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
>                    p2m_populate_on_demand, p2m->default_access);
> +    p2m_tlb_flush_sync(p2m);
> 
>      /* Make none of the MFNs are used elsewhere... for example, mapped
>       * via the grant table interface, or by qemu.  Allow one refcount for
> @@ -886,6 +888,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long
> *gfns, int count)
>          }
>      }
> 
> +    p2m_tlb_flush_sync(p2m);
> +
>      /* Now check each page for real */
>      for ( i=0; i < count; i++ )
>      {
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index a45ee35..36a8fb7 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -325,6 +325,25 @@ void p2m_flush_hardware_cached_dirty(struct domain *d)
>      }
>  }
> 
> +/*
> + * Force a synchronous P2M TLB flush if a deferred flush is pending.
> + *
> + * Must be called with the p2m lock held.
> + */
> +void p2m_tlb_flush_sync(struct p2m_domain *p2m)
> +{
> +    if ( p2m->need_flush )
> +        p2m->flush_and_unlock(p2m, 0);
> +}
> +
> +void p2m_tlb_flush_and_unlock(struct p2m_domain *p2m)
> +{
> +    if ( p2m->need_flush )
> +        p2m->flush_and_unlock(p2m, 1);
> +    else
> +        mm_write_unlock(&p2m->lock);
> +}

prefer to move general stuff into this function, then you could just
keep a flush() callback, e.g.:

void p2m_tlb_flush_and_unlock(struct p2m_domain *p2m)
{
    if ( p2m->need_flush )
    {
        p2m->need_flush = 0;
        mm_write_unlock(&p2m->lock);
        p2m->flush(p2m);
    }
    else
        mm_write_unlock(&p2m->lock);
}

Same for p2m_tlb_flush_sync.

Thanks
Kevin
David Vrabel April 12, 2016, 1:08 p.m. UTC | #2
On 03/02/16 03:44, Tian, Kevin wrote:
>> From: David Vrabel [mailto:david.vrabel@citrix.com]
>> Sent: Tuesday, February 02, 2016 12:27 AM

Looks like I forgot about this patch.

>> It is safe to defer the invalidate until the p2m lock is released
>> except for two cases:
>>
>> 1. When freeing a page table page (since partial translations may be
>>    cached).
>> 2. When reclaiming a zero page as part of PoD.
>>
>> For these cases, add p2m_tlb_flush_sync() calls which will immediately
>> perform the invalidate before the page is freed or reclaimed.
>>
>> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
> [...]
>> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
>> index c094320..43c7f1b 100644
>> --- a/xen/arch/x86/mm/p2m-ept.c
>> +++ b/xen/arch/x86/mm/p2m-ept.c
>> @@ -263,6 +263,7 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t
>> *ept_entry, int l
>>          unmap_domain_page(epte);
>>      }
>>
>> +    p2m_tlb_flush_sync(p2m);
>>      p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
>>  }
>>
>> @@ -1095,15 +1096,10 @@ static void __ept_sync_domain(void *info)
>>       */
>>  }
>>
>> -void ept_sync_domain(struct p2m_domain *p2m)
>> +static void ept_sync_domain_prepare(struct p2m_domain *p2m)
>>  {
>>      struct domain *d = p2m->domain;
>>      struct ept_data *ept = &p2m->ept;
>> -    /* Only if using EPT and this domain has some VCPUs to dirty. */
>> -    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
>> -        return;
>> -
>> -    ASSERT(local_irq_is_enabled());
>>
>>      if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
>>          p2m_flush_nestedp2m(d);
> 
> should we postpone nestedp2m flush similarly, which also incurs 
> on_selected_cpus when holding p2m lock?

Possibly.  I have not looked at the nestedp2m stuff as it wasn't a use
case I cared about.

I think any changes in this area could be done separately.

>> --- a/xen/arch/x86/mm/p2m.c
>> +++ b/xen/arch/x86/mm/p2m.c
>> @@ -325,6 +325,25 @@ void p2m_flush_hardware_cached_dirty(struct domain *d)
>>      }
>>  }
>>
>> +/*
>> + * Force a synchronous P2M TLB flush if a deferred flush is pending.
>> + *
>> + * Must be called with the p2m lock held.
>> + */
>> +void p2m_tlb_flush_sync(struct p2m_domain *p2m)
>> +{
>> +    if ( p2m->need_flush )
>> +        p2m->flush_and_unlock(p2m, 0);
>> +}
>> +
>> +void p2m_tlb_flush_and_unlock(struct p2m_domain *p2m)
>> +{
>> +    if ( p2m->need_flush )
>> +        p2m->flush_and_unlock(p2m, 1);
>> +    else
>> +        mm_write_unlock(&p2m->lock);
>> +}
> 
> prefer to move general stuff into this function, then you could just
> keep a flush() callback, e.g.:
> 
> void p2m_tlb_flush_and_unlock(struct p2m_domain *p2m)
> {
>     if ( p2m->need_flush )
>     {
>         p2m->need_flush = 0;
>         mm_write_unlock(&p2m->lock);
>         p2m->flush(p2m);
>     }
>     else
>         mm_write_unlock(&p2m->lock);
> }
> 
> Same for p2m_tlb_flush_sync.

I'm sure there was a reason why I did it like this but I can't remember.
 Let me try your suggestion.

David
diff mbox

Patch

diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 8a40986..2e8747e 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -265,14 +265,21 @@  declare_mm_lock(altp2mlist)
  */
 
 declare_mm_rwlock(altp2m);
-#define p2m_lock(p)                         \
-{                                           \
-    if ( p2m_is_altp2m(p) )                 \
-        mm_write_lock(altp2m, &(p)->lock);  \
-    else                                    \
-        mm_write_lock(p2m, &(p)->lock);     \
-}
-#define p2m_unlock(p)         mm_write_unlock(&(p)->lock);
+#define p2m_lock(p)                             \
+    do {                                        \
+        if ( p2m_is_altp2m(p) )                 \
+            mm_write_lock(altp2m, &(p)->lock);  \
+        else                                    \
+            mm_write_lock(p2m, &(p)->lock);     \
+        (p)->defer_flush++;                     \
+    } while (0)
+#define p2m_unlock(p)                           \
+    do {                                        \
+        if ( --(p)->defer_flush == 0 )          \
+            p2m_tlb_flush_and_unlock(p);        \
+        else                                    \
+            mm_write_unlock(&(p)->lock);        \
+    } while (0)
 #define gfn_lock(p,g,o)       p2m_lock(p)
 #define gfn_unlock(p,g,o)     p2m_unlock(p)
 #define p2m_read_lock(p)      mm_read_lock(p2m, &(p)->lock)
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index c094320..43c7f1b 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -263,6 +263,7 @@  static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int l
         unmap_domain_page(epte);
     }
     
+    p2m_tlb_flush_sync(p2m);
     p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
 }
 
@@ -1095,15 +1096,10 @@  static void __ept_sync_domain(void *info)
      */
 }
 
-void ept_sync_domain(struct p2m_domain *p2m)
+static void ept_sync_domain_prepare(struct p2m_domain *p2m)
 {
     struct domain *d = p2m->domain;
     struct ept_data *ept = &p2m->ept;
-    /* Only if using EPT and this domain has some VCPUs to dirty. */
-    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
-        return;
-
-    ASSERT(local_irq_is_enabled());
 
     if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
         p2m_flush_nestedp2m(d);
@@ -1116,9 +1112,38 @@  void ept_sync_domain(struct p2m_domain *p2m)
      *    of an EP4TA reuse is still needed.
      */
     cpumask_setall(ept->invalidate);
+}
+
+static void ept_sync_domain_mask(struct p2m_domain *p2m, const cpumask_t *mask)
+{
+    on_selected_cpus(mask, __ept_sync_domain, p2m, 1);
+}
+
+void ept_sync_domain(struct p2m_domain *p2m)
+{
+    struct domain *d = p2m->domain;
 
-    on_selected_cpus(d->domain_dirty_cpumask,
-                     __ept_sync_domain, p2m, 1);
+    /* Only if using EPT and this domain has some VCPUs to dirty. */
+    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
+        return;
+
+    ept_sync_domain_prepare(p2m);
+
+    if ( p2m->defer_flush )
+    {
+        p2m->need_flush = 1;
+        return;
+    }
+
+    ept_sync_domain_mask(p2m, d->domain_dirty_cpumask);
+}
+
+static void ept_flush_and_unlock(struct p2m_domain *p2m, bool_t unlock)
+{
+    p2m->need_flush = 0;
+    if ( unlock )
+        mm_write_unlock(&p2m->lock);
+    ept_sync_domain_mask(p2m, p2m->domain->domain_dirty_cpumask);
 }
 
 static void ept_enable_pml(struct p2m_domain *p2m)
@@ -1169,6 +1194,7 @@  int ept_p2m_init(struct p2m_domain *p2m)
     p2m->change_entry_type_range = ept_change_entry_type_range;
     p2m->memory_type_changed = ept_memory_type_changed;
     p2m->audit_p2m = NULL;
+    p2m->flush_and_unlock = ept_flush_and_unlock;
 
     /* Set the memory type used when accessing EPT paging structures. */
     ept->ept_mt = EPT_DEFAULT_MT;
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index ea16d3e..35835d1 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -626,6 +626,7 @@  p2m_pod_decrease_reservation(struct domain *d,
 
             p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
                           p2m_invalid, p2m->default_access);
+            p2m_tlb_flush_sync(p2m);
             for ( j = 0; j < n; ++j )
                 set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
             p2m_pod_cache_add(p2m, page, cur_order);
@@ -755,6 +756,7 @@  p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     /* Try to remove the page, restoring old mapping if it fails. */
     p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
                   p2m_populate_on_demand, p2m->default_access);
+    p2m_tlb_flush_sync(p2m);
 
     /* Make none of the MFNs are used elsewhere... for example, mapped
      * via the grant table interface, or by qemu.  Allow one refcount for
@@ -886,6 +888,8 @@  p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
         }
     }
 
+    p2m_tlb_flush_sync(p2m);
+
     /* Now check each page for real */
     for ( i=0; i < count; i++ )
     {
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index a45ee35..36a8fb7 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -325,6 +325,25 @@  void p2m_flush_hardware_cached_dirty(struct domain *d)
     }
 }
 
+/*
+ * Force a synchronous P2M TLB flush if a deferred flush is pending.
+ *
+ * Must be called with the p2m lock held.
+ */
+void p2m_tlb_flush_sync(struct p2m_domain *p2m)
+{
+    if ( p2m->need_flush )
+        p2m->flush_and_unlock(p2m, 0);
+}
+
+void p2m_tlb_flush_and_unlock(struct p2m_domain *p2m)
+{
+    if ( p2m->need_flush )
+        p2m->flush_and_unlock(p2m, 1);
+    else
+        mm_write_unlock(&p2m->lock);
+}
+
 mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn,
                     p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
                     unsigned int *page_order, bool_t locked)
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index fa46dd9..d1c4a41 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -262,6 +262,24 @@  struct p2m_domain {
                                           l1_pgentry_t new, unsigned int level);
     long               (*audit_p2m)(struct p2m_domain *p2m);
 
+    /*
+     * P2M updates may require TLBs to be flushed (invalidated).
+     *
+     * If 'defer_flush' is set, flushes may be deferred by setting
+     * 'need_flush' and then flushing in 'flush_and_unlock()'.
+     *
+     * 'flush_and_unlock()' is only called if 'need_flush' is set.  It
+     * must clear 'need_flush', call 'mm_write_unlock&p2m->lock)' if
+     * 'unlock' is true, and perform the flush.
+     *
+     * If a flush may be being deferred but an immediate flush is
+     * required (e.g., if a page is being freed to pool other than the
+     * domheap), call p2m_tlb_flush_sync().
+     */
+    void (*flush_and_unlock)(struct p2m_domain *p2m, bool_t unlock);
+    unsigned int defer_flush;
+    bool_t need_flush;
+
     /* Default P2M access type for each page in the the domain: new pages,
      * swapped in pages, cleared pages, and pages that are ambiguously
      * retyped get this access type.  See definition of p2m_access_t. */
@@ -353,6 +371,12 @@  static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m)
 
 #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
 
+/*
+ * Ensure any deferred p2m TLB flush has been completed on all VCPUs.
+ */
+void p2m_tlb_flush_sync(struct p2m_domain *p2m);
+void p2m_tlb_flush_and_unlock(struct p2m_domain *p2m);
+
 /**** p2m query accessors. They lock p2m_lock, and thus serialize
  * lookups wrt modifications. They _do not_ release the lock on exit.
  * After calling any of the variants below, caller needs to use