diff mbox series

[v10,08/12] xen/page_alloc: introduce preserved page flags macro

Message ID 20241119141329.44221-9-carlo.nonato@minervasys.tech (mailing list archive)
State New
Headers show
Series Arm cache coloring | expand

Commit Message

Carlo Nonato Nov. 19, 2024, 2:13 p.m. UTC
PGC_static, PGC_extra and PGC_broken need to be preserved when assigning a
page. Define a new macro that groups those flags and use it instead of or'ing
every time.

To make preserved flags even more meaningful, they are kept also when
switching state in mark_page_free().
Enforce the removal of PGC_extra before freeing domain pages as this is
considered an error and can cause ASSERT violations.

Signed-off-by: Carlo Nonato <carlo.nonato@minervasys.tech>
---
v10:
- fixed commit message
v9:
- add PGC_broken to PGC_preserved
- clear PGC_extra in alloc_domheap_pages() only if MEMF_no_refcount is set
v8:
- fixed PGC_extra ASSERT fail in alloc_domheap_pages() by removing PGC_extra
  before freeing
v7:
- PGC_preserved used also in mark_page_free()
v6:
- preserved_flags renamed to PGC_preserved
- PGC_preserved is used only in assign_pages()
v5:
- new patch
---
 xen/common/page_alloc.c | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7b911b5ed9..34cd473150 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -160,6 +160,7 @@ 
 #endif
 
 #define PGC_no_buddy_merge PGC_static
+#define PGC_preserved (PGC_extra | PGC_static | PGC_broken)
 
 #ifndef PGT_TYPE_INFO_INITIALIZER
 #define PGT_TYPE_INFO_INITIALIZER 0
@@ -1427,12 +1428,11 @@  static bool mark_page_free(struct page_info *pg, mfn_t mfn)
     {
     case PGC_state_inuse:
         BUG_ON(pg->count_info & PGC_broken);
-        pg->count_info = PGC_state_free;
+        pg->count_info = PGC_state_free | (pg->count_info & PGC_preserved);
         break;
 
     case PGC_state_offlining:
-        pg->count_info = (pg->count_info & PGC_broken) |
-                         PGC_state_offlined;
+        pg->count_info = (pg->count_info & PGC_preserved) | PGC_state_offlined;
         pg_offlined = true;
         break;
 
@@ -2366,7 +2366,7 @@  int assign_pages(
 
         for ( i = 0; i < nr; i++ )
         {
-            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_static)));
+            ASSERT(!(pg[i].count_info & ~PGC_preserved));
             if ( pg[i].count_info & PGC_extra )
                 extra_pages++;
         }
@@ -2426,7 +2426,7 @@  int assign_pages(
         page_set_owner(&pg[i], d);
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
-            (pg[i].count_info & (PGC_extra | PGC_static)) | PGC_allocated | 1;
+            (pg[i].count_info & PGC_preserved) | PGC_allocated | 1;
 
         page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
@@ -2485,6 +2485,14 @@  struct page_info *alloc_domheap_pages(
         }
         if ( assign_page(pg, order, d, memflags) )
         {
+            if ( memflags & MEMF_no_refcount )
+            {
+                unsigned long i;
+
+                for ( i = 0; i < (1UL << order); i++ )
+                    pg[i].count_info &= ~PGC_extra;
+            }
+
             free_heap_pages(pg, order, memflags & MEMF_no_scrub);
             return NULL;
         }
@@ -2539,6 +2547,7 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
                 {
                     ASSERT(d->extra_pages);
                     d->extra_pages--;
+                    pg[i].count_info &= ~PGC_extra;
                 }
             }