diff mbox series

[v5,06/18] x86/mem_sharing: don't try to unshare twice during page fault

Message ID 0cb067e1d0ab8bbba63a461d61c4f07cd15ab0d6.1579628566.git.tamas.lengyel@intel.com (mailing list archive)
State Superseded
Headers show
Series VM forking | expand

Commit Message

Tamas K Lengyel Jan. 21, 2020, 5:49 p.m. UTC
The page was already tried to be unshared in get_gfn_type_access. If that
didn't work, then trying again is pointless. Don't try to send vm_event again
either, simply check if there is a ring or not.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/hvm/hvm.c | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 55bf7353c9..e60b4931bf 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -38,6 +38,7 @@ 
 #include <xen/warning.h>
 #include <xen/vpci.h>
 #include <xen/nospec.h>
+#include <xen/vm_event.h>
 #include <asm/shadow.h>
 #include <asm/hap.h>
 #include <asm/current.h>
@@ -1702,7 +1703,7 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
     struct domain *currd = curr->domain;
     struct p2m_domain *p2m, *hostp2m;
     int rc, fall_through = 0, paged = 0;
-    int sharing_enomem = 0;
+    bool sharing_enomem = false;
     vm_event_request_t *req_ptr = NULL;
     bool sync = false;
     unsigned int page_order;
@@ -1894,14 +1895,16 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
         paged = 1;
 
-    /* Mem sharing: unshare the page and try again */
-    if ( npfec.write_access && (p2mt == p2m_ram_shared) )
+#ifdef CONFIG_MEM_SHARING
+    /* Mem sharing: if still shared on write access then its enomem */
+    if ( npfec.write_access && p2m_is_shared(p2mt) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        sharing_enomem = mem_sharing_unshare_page(currd, gfn);
+        sharing_enomem = true;
         rc = 1;
         goto out_put_gfn;
     }
+#endif
 
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
@@ -1955,19 +1958,21 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
      */
     if ( paged )
         p2m_mem_paging_populate(currd, gfn);
+
     if ( sharing_enomem )
     {
-        int rv;
-
-        if ( (rv = mem_sharing_notify_enomem(currd, gfn, true)) < 0 )
+#ifdef CONFIG_MEM_SHARING
+        if ( !vm_event_check_ring(currd->vm_event_share) )
         {
-            gdprintk(XENLOG_ERR, "Domain %hu attempt to unshare "
-                     "gfn %lx, ENOMEM and no helper (rc %d)\n",
-                     currd->domain_id, gfn, rv);
+            gprintk(XENLOG_ERR, "Domain %pd attempt to unshare "
+                    "gfn %lx, ENOMEM and no helper\n",
+                    currd, gfn);
             /* Crash the domain */
             rc = 0;
         }
+#endif
     }
+
     if ( req_ptr )
     {
         if ( monitor_traps(curr, sync, req_ptr) < 0 )