@@ -485,6 +485,9 @@ static unsigned long node_need_scrub[MAX_NUMNODES];
static unsigned long *avail[MAX_NUMNODES];
static long total_avail_pages;
+/* Per-node counts of free pages */
+static unsigned long pernode_avail_pages[MAX_NUMNODES];
+
static DEFINE_SPINLOCK(heap_lock);
static long outstanding_claims; /* total outstanding claims by all domains */
@@ -1033,6 +1036,7 @@ static struct page_info *alloc_heap_pages(
ASSERT(avail[node][zone] >= request);
avail[node][zone] -= request;
+ pernode_avail_pages[node] -= request;
total_avail_pages -= request;
ASSERT(total_avail_pages >= 0);
@@ -1191,6 +1195,8 @@ static int reserve_offlined_page(struct page_info *head)
continue;
avail[node][zone]--;
+ ASSERT(pernode_avail_pages[node] > 0);
+ pernode_avail_pages[node]--;
total_avail_pages--;
ASSERT(total_avail_pages >= 0);
@@ -1515,6 +1521,7 @@ static void free_heap_pages(
}
avail[node][zone] += 1 << order;
+ pernode_avail_pages[node] += 1 << order;
total_avail_pages += 1 << order;
if ( need_scrub )
{
These are effectively the sum of free memory in all zones of each node. It's an optimization to avoid doing that operation frequently in following patches that introduce exact-node claims. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com> --- xen/common/page_alloc.c | 7 +++++++ 1 file changed, 7 insertions(+)