diff mbox series

[RFC,10/10,HACK] alloc pages: enable preemption early

Message ID 20210223023428.757694-11-volodymyr_babchuk@epam.com (mailing list archive)
State New, archived
Headers show
Series Preemption in hypervisor (ARM only) | expand

Commit Message

Volodymyr Babchuk Feb. 23, 2021, 2:34 a.m. UTC
This code moves spin_unlock() and rcu_unlock_domain() earlier in the
code just to decrease time we spent with preemption disabled. Proper
fix is to replace spinlocks with mutexes, but mutexes are not
implemented yet.

With this patch enabled, allocation huge number of pages (e.g. 1GB of
RAM) does not leads to problems with latency in time-critical
domains.

Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
---
 xen/common/memory.c     |  4 ++--
 xen/common/page_alloc.c | 21 ++-------------------
 2 files changed, 4 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 76b9f58478..73c175f64e 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1390,6 +1390,8 @@  long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             pv_shim_online_memory(args.nr_extents, args.extent_order);
 #endif
 
+        rcu_unlock_domain(d);
+
         switch ( op )
         {
         case XENMEM_increase_reservation:
@@ -1403,8 +1405,6 @@  long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
             break;
         }
 
-        rcu_unlock_domain(d);
-
         rc = args.nr_done;
 
         if ( args.preempted )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 1744e6faa5..43c2f5d6e0 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -996,6 +996,8 @@  static struct page_info *alloc_heap_pages(
     if ( d != NULL )
         d->last_alloc_node = node;
 
+    spin_unlock(&heap_lock);
+
     for ( i = 0; i < (1 << order); i++ )
     {
         /* Reference count must continuously be zero for free pages. */
@@ -1025,8 +1027,6 @@  static struct page_info *alloc_heap_pages(
 
     }
 
-    spin_unlock(&heap_lock);
-
     if ( first_dirty != INVALID_DIRTY_IDX ||
          (scrub_debug && !(memflags & MEMF_no_scrub)) )
     {
@@ -2274,23 +2274,6 @@  int assign_pages(
         goto out;
     }
 
-#ifndef NDEBUG
-    {
-        unsigned int extra_pages = 0;
-
-        for ( i = 0; i < (1ul << order); i++ )
-        {
-            ASSERT(!(pg[i].count_info & ~PGC_extra));
-            if ( pg[i].count_info & PGC_extra )
-                extra_pages++;
-        }
-
-        ASSERT(!extra_pages ||
-               ((memflags & MEMF_no_refcount) &&
-                extra_pages == 1u << order));
-    }
-#endif
-
     if ( pg[0].count_info & PGC_extra )
     {
         d->extra_pages += 1u << order;