@@ -1390,6 +1390,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
pv_shim_online_memory(args.nr_extents, args.extent_order);
#endif
+ rcu_unlock_domain(d);
+
switch ( op )
{
case XENMEM_increase_reservation:
@@ -1403,8 +1405,6 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
- rcu_unlock_domain(d);
-
rc = args.nr_done;
if ( args.preempted )
@@ -996,6 +996,8 @@ static struct page_info *alloc_heap_pages(
if ( d != NULL )
d->last_alloc_node = node;
+ spin_unlock(&heap_lock);
+
for ( i = 0; i < (1 << order); i++ )
{
/* Reference count must continuously be zero for free pages. */
@@ -1025,8 +1027,6 @@ static struct page_info *alloc_heap_pages(
}
- spin_unlock(&heap_lock);
-
if ( first_dirty != INVALID_DIRTY_IDX ||
(scrub_debug && !(memflags & MEMF_no_scrub)) )
{
@@ -2274,23 +2274,6 @@ int assign_pages(
goto out;
}
-#ifndef NDEBUG
- {
- unsigned int extra_pages = 0;
-
- for ( i = 0; i < (1ul << order); i++ )
- {
- ASSERT(!(pg[i].count_info & ~PGC_extra));
- if ( pg[i].count_info & PGC_extra )
- extra_pages++;
- }
-
- ASSERT(!extra_pages ||
- ((memflags & MEMF_no_refcount) &&
- extra_pages == 1u << order));
- }
-#endif
-
if ( pg[0].count_info & PGC_extra )
{
d->extra_pages += 1u << order;
This code moves spin_unlock() and rcu_unlock_domain() earlier in the code just to decrease time we spent with preemption disabled. Proper fix is to replace spinlocks with mutexes, but mutexes are not implemented yet. With this patch enabled, allocation huge number of pages (e.g. 1GB of RAM) does not leads to problems with latency in time-critical domains. Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com> --- xen/common/memory.c | 4 ++-- xen/common/page_alloc.c | 21 ++------------------- 2 files changed, 4 insertions(+), 21 deletions(-)