diff mbox series

[v3] x86/altp2m: cleanup p2m_altp2m_lazy_copy

Message ID 20190528102823.14171-1-george.dunlap@citrix.com (mailing list archive)
State New, archived
Headers show
Series [v3] x86/altp2m: cleanup p2m_altp2m_lazy_copy | expand

Commit Message

George Dunlap May 28, 2019, 10:28 a.m. UTC
From: Tamas K Lengyel <tamas@tklengyel.com>

The p2m_altp2m_lazy_copy is responsible for lazily populating an
altp2m view when the guest traps out due to no EPT entry being present
in the active view.  Currently, in addition to taking a number of
unused argements, the whole calling convention has a number of
redundant p2m lookups: the function reads the hostp2m, even though the
caller has just read the same hostp2m entry; and then the caller
re-reads the altp2m entry that the function has just read (and possibly set).

Rework this function to make it a bit more rational.  Specifically:

- Pass the current hostp2m entry values we have just read for it to
  use to populate the altp2m entry if it finds the entry empty.

- If the altp2m entry is not empty, pass out the values we've read so
  the caller doesn't need to re-walk the tables

- Either way, return with the gfn 'locked', to make clean-up handling
  more consistent.

Rename the function to better reflect this functionality.

While we're here, change bool_t to bool, and return true/false rather
than 1/0.

It's a bit grating to do both the p2m_lock() and the get_gfn(),
knowing that they boil down to the same thing at the moment; but we
have to maintain the fiction until such time as we decide to get rid
of it entirely.

Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
This is identical to the patch I sent in response to Tamas' v2

Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Roger Pau Monne <roger.pau@citrix.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/hvm/hvm.c    | 19 +++++---
 xen/arch/x86/mm/p2m.c     | 95 +++++++++++++++++++++------------------
 xen/include/asm-x86/p2m.h |  5 ++-
 3 files changed, 67 insertions(+), 52 deletions(-)

Comments

Jan Beulich May 28, 2019, 11:56 a.m. UTC | #1
>>> On 28.05.19 at 12:28, <george.dunlap@citrix.com> wrote:
> From: Tamas K Lengyel <tamas@tklengyel.com>
> 
> The p2m_altp2m_lazy_copy is responsible for lazily populating an
> altp2m view when the guest traps out due to no EPT entry being present
> in the active view.  Currently, in addition to taking a number of
> unused argements, the whole calling convention has a number of
> redundant p2m lookups: the function reads the hostp2m, even though the
> caller has just read the same hostp2m entry; and then the caller
> re-reads the altp2m entry that the function has just read (and possibly set).
> 
> Rework this function to make it a bit more rational.  Specifically:
> 
> - Pass the current hostp2m entry values we have just read for it to
>   use to populate the altp2m entry if it finds the entry empty.
> 
> - If the altp2m entry is not empty, pass out the values we've read so
>   the caller doesn't need to re-walk the tables
> 
> - Either way, return with the gfn 'locked', to make clean-up handling
>   more consistent.
> 
> Rename the function to better reflect this functionality.
> 
> While we're here, change bool_t to bool, and return true/false rather
> than 1/0.
> 
> It's a bit grating to do both the p2m_lock() and the get_gfn(),
> knowing that they boil down to the same thing at the moment; but we
> have to maintain the fiction until such time as we decide to get rid
> of it entirely.
> 
> Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>

FWIW
Acked-by: Jan Beulich <jbeulich@suse.com>

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index ed1ff9c87f..2f4e7bd30e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1691,6 +1691,7 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
     int sharing_enomem = 0;
     vm_event_request_t *req_ptr = NULL;
     bool_t ap2m_active, sync = 0;
+    unsigned int page_order;
 
     /* On Nested Virtualization, walk the guest page table.
      * If this succeeds, all is fine.
@@ -1757,19 +1758,23 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
     hostp2m = p2m_get_hostp2m(currd);
     mfn = get_gfn_type_access(hostp2m, gfn, &p2mt, &p2ma,
                               P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE : 0),
-                              NULL);
+                              &page_order);
 
     if ( ap2m_active )
     {
-        if ( p2m_altp2m_lazy_copy(curr, gpa, gla, npfec, &p2m) )
+        p2m = p2m_get_altp2m(curr);
+
+        /* 
+         * Get the altp2m entry if present; or if not, propagate from
+         * the host p2m.  NB that this returns with gfn locked in the
+         * altp2m.
+         */
+        if ( p2m_altp2m_get_or_propagate(p2m, gfn, &mfn, &p2mt, &p2ma, page_order) )
         {
-            /* entry was lazily copied from host -- retry */
-            __put_gfn(hostp2m, gfn);
+            /* Entry was copied from host -- retry fault */
             rc = 1;
-            goto out;
+            goto out_put_gfn;
         }
-
-        mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
     }
     else
         p2m = hostp2m;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 278e1c114e..385146ca63 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2397,65 +2397,74 @@  bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
 }
 
 /*
- * If the fault is for a not present entry:
- *     if the entry in the host p2m has a valid mfn, copy it and retry
- *     else indicate that outer handler should handle fault
+ * Read info about the gfn in an altp2m, locking the gfn.
  *
- * If the fault is for a present entry:
- *     indicate that outer handler should handle fault
+ * If the entry is valid, pass the results back to the caller.
+ *
+ * If the entry was invalid, and the host's entry is also invalid,
+ * return to the caller without any changes.
+ *
+ * If the entry is invalid, and the host entry was valid, propagate
+ * the host's entry to the altp2m (retaining page order), and indicate
+ * that the caller should re-try the faulting instruction.
  */
-
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
-                            unsigned long gla, struct npfec npfec,
-                            struct p2m_domain **ap2m)
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+                                 mfn_t *mfn, p2m_type_t *p2mt,
+                                 p2m_access_t *p2ma, unsigned int page_order)
 {
-    struct p2m_domain *hp2m = p2m_get_hostp2m(v->domain);
-    p2m_type_t p2mt;
-    p2m_access_t p2ma;
-    unsigned int page_order;
-    gfn_t gfn = _gfn(paddr_to_pfn(gpa));
+    p2m_type_t ap2mt;
+    p2m_access_t ap2ma;
     unsigned long mask;
-    mfn_t mfn;
-    int rv;
-
-    *ap2m = p2m_get_altp2m(v);
-
-    mfn = get_gfn_type_access(*ap2m, gfn_x(gfn), &p2mt, &p2ma,
-                              0, &page_order);
-    __put_gfn(*ap2m, gfn_x(gfn));
-
-    if ( !mfn_eq(mfn, INVALID_MFN) )
-        return 0;
+    gfn_t gfn;
+    mfn_t amfn;
+    int rc;
 
-    mfn = get_gfn_type_access(hp2m, gfn_x(gfn), &p2mt, &p2ma,
-                              P2M_ALLOC, &page_order);
-    __put_gfn(hp2m, gfn_x(gfn));
+    /*
+     * NB we must get the full lock on the altp2m here, in addition to
+     * the lock on the individual gfn, since we may change a range of
+     * gfns below.
+     */
+    p2m_lock(ap2m);
+    
+    amfn = get_gfn_type_access(ap2m, gfn_l, &ap2mt, &ap2ma, 0, NULL);
 
-    if ( mfn_eq(mfn, INVALID_MFN) )
-        return 0;
+    if ( !mfn_eq(amfn, INVALID_MFN) )
+    {
+        p2m_unlock(ap2m);
+        *mfn  = amfn;
+        *p2mt = ap2mt;
+        *p2ma = ap2ma;
+        return false;
+    }
 
-    p2m_lock(*ap2m);
+    /* Host entry is also invalid; don't bother setting the altp2m entry. */
+    if ( mfn_eq(*mfn, INVALID_MFN) )
+    {
+        p2m_unlock(ap2m);
+        return false;
+    }
 
     /*
      * If this is a superpage mapping, round down both frame numbers
-     * to the start of the superpage.
+     * to the start of the superpage.  NB that we repupose `amfn`
+     * here.
      */
     mask = ~((1UL << page_order) - 1);
-    mfn = _mfn(mfn_x(mfn) & mask);
-    gfn = _gfn(gfn_x(gfn) & mask);
+    amfn = _mfn(mfn_x(*mfn) & mask);
+    gfn = _gfn(gfn_l & mask);
 
-    rv = p2m_set_entry(*ap2m, gfn, mfn, page_order, p2mt, p2ma);
-    p2m_unlock(*ap2m);
+    rc = p2m_set_entry(ap2m, gfn, amfn, page_order, *p2mt, *p2ma);
+    p2m_unlock(ap2m);
 
-    if ( rv )
+    if ( rc )
     {
-        gdprintk(XENLOG_ERR,
-	    "failed to set entry for %#"PRIx64" -> %#"PRIx64" p2m %#"PRIx64"\n",
-	    gfn_x(gfn), mfn_x(mfn), (unsigned long)*ap2m);
-        domain_crash(hp2m->domain);
+        gprintk(XENLOG_ERR,
+        "failed to set entry for %#"PRIx64" -> %#"PRIx64" altp2m %#"PRIx16". rc: %"PRIi32"\n",
+        gfn_l, mfn_x(amfn), vcpu_altp2m(current).p2midx, rc);
+        domain_crash(ap2m->domain);
     }
-
-    return 1;
+    
+    return true;
 }
 
 enum altp2m_reset_type {
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 719513f4ba..905fad7c8d 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -879,8 +879,9 @@  void p2m_altp2m_check(struct vcpu *v, uint16_t idx);
 void p2m_flush_altp2m(struct domain *d);
 
 /* Alternate p2m paging */
-bool_t p2m_altp2m_lazy_copy(struct vcpu *v, paddr_t gpa,
-    unsigned long gla, struct npfec npfec, struct p2m_domain **ap2m);
+bool p2m_altp2m_get_or_propagate(struct p2m_domain *ap2m, unsigned long gfn_l,
+                                 mfn_t *mfn, p2m_type_t *p2mt,
+                                 p2m_access_t *p2ma, unsigned int page_order);
 
 /* Make a specific alternate p2m valid */
 int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx);