diff mbox series

[v3] page_alloc: assert IRQs are enabled in heap alloc/free

Message ID 20220422153601.967318-1-dvrabel@cantab.net (mailing list archive)
State Superseded
Headers show
Series [v3] page_alloc: assert IRQs are enabled in heap alloc/free | expand

Commit Message

David Vrabel April 22, 2022, 3:36 p.m. UTC
From: David Vrabel <dvrabel@amazon.co.uk>

Heap pages can only be safely allocated and freed with interuupts
enabled as they may require a TLB flush which will send IPIs.

Normally spinlock debugging would catch calls from the incorrect
context, but not from stop_machine_run() action functions as these are
called with spin lock debugging disabled.

Enhance the assertions in alloc_xenheap_pages() and
alloc_domheap_pages() to check interrupts are enabled. For consistency
the same asserts are used when freeing heap pages.

As an exception, when only 1 PCPU is online, allocations are permitted
with interrupts disabled as any TLB flushes would be local only. This
is necessary during early boot.

Signed-off-by: David Vrabel <dvrabel@amazon.co.uk>
---
Changes in v3:
- Use num_online_cpus() in assert.

Changes in v2:
- Set SYS_STATE_smp_boot on arm.
---
 xen/common/page_alloc.c | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

Comments

Julien Grall April 24, 2022, 3:52 p.m. UTC | #1
Hi David,

On 22/04/2022 16:36, David Vrabel wrote:
> From: David Vrabel <dvrabel@amazon.co.uk>
> 
> Heap pages can only be safely allocated and freed with interuupts

typo: s/interuupts/interrupts/

> enabled as they may require a TLB flush which will send IPIs.

We don't have such requirements on Arm. Given this is common code, I 
think we should write "which may send IPIs on some architectures (such 
as x86).

That said, I think the change is still a good move on Arm because I 
don't think it is sane to do allocation with interrupts disabled.

> 
> Normally spinlock debugging would catch calls from the incorrect
> context, but not from stop_machine_run() action functions as these are
> called with spin lock debugging disabled.
> 
> Enhance the assertions in alloc_xenheap_pages() and
> alloc_domheap_pages() to check interrupts are enabled. For consistency
> the same asserts are used when freeing heap pages.
> 
> As an exception, when only 1 PCPU is online, allocations are permitted
> with interrupts disabled as any TLB flushes would be local only. This
> is necessary during early boot.
> 
> Signed-off-by: David Vrabel <dvrabel@amazon.co.uk>
> ---
> Changes in v3:
> - Use num_online_cpus() in assert.
> 
> Changes in v2:
> - Set SYS_STATE_smp_boot on arm.
> ---
>   xen/common/page_alloc.c | 23 +++++++++++++++--------
>   1 file changed, 15 insertions(+), 8 deletions(-)
> 
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 319029140f..516ffa2a97 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -162,6 +162,13 @@
>   static char __initdata opt_badpage[100] = "";
>   string_param("badpage", opt_badpage);
>   
> +/*
> + * Heap allocations may need TLB flushes which require IRQs to be
> + * enabled (except during early boot when only 1 PCPU is online).

Same remark as above. Also, I think there are other cases where 
num_online_cpus() == 1:
   - Xen is only using one core (it will not be a useful system but 
technically supported)
   - During suspend/resume

So I think we should either relax the comment or restrict the assert 
below. I don't have any preference.

> + */ > +#define ASSERT_ALLOC_CONTEXT() \
> +    ASSERT(!in_irq() && (local_irq_is_enabled() || num_online_cpus() == 1))
> +
>   /*
>    * no-bootscrub -> Free pages are not zeroed during boot.
>    */
> @@ -2160,7 +2167,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>   {
>       struct page_info *pg;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN,
>                             order, memflags | MEMF_no_scrub, NULL);
> @@ -2173,7 +2180,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>   
>   void free_xenheap_pages(void *v, unsigned int order)
>   {
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       if ( v == NULL )
>           return;
> @@ -2202,7 +2209,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>       struct page_info *pg;
>       unsigned int i;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       if ( xenheap_bits && (memflags >> _MEMF_bits) > xenheap_bits )
>           memflags &= ~MEMF_bits(~0U);
> @@ -2224,7 +2231,7 @@ void free_xenheap_pages(void *v, unsigned int order)
>       struct page_info *pg;
>       unsigned int i;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       if ( v == NULL )
>           return;
> @@ -2249,7 +2256,7 @@ void init_domheap_pages(paddr_t ps, paddr_t pe)
>   {
>       mfn_t smfn, emfn;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       smfn = maddr_to_mfn(round_pgup(ps));
>       emfn = maddr_to_mfn(round_pgdown(pe));
> @@ -2369,7 +2376,7 @@ struct page_info *alloc_domheap_pages(
>       unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
>       unsigned int dma_zone;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       bits = domain_clamp_alloc_bitsize(memflags & MEMF_no_owner ? NULL : d,
>                                         bits ? : (BITS_PER_LONG+PAGE_SHIFT));
> @@ -2419,7 +2426,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
>       unsigned int i;
>       bool drop_dom_ref;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       if ( unlikely(is_xen_heap_page(pg)) )
>       {
> @@ -2738,7 +2745,7 @@ int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
>   {
>       struct page_info *pg;
>   
> -    ASSERT(!in_irq());
> +    ASSERT_ALLOC_CONTEXT();
>   
>       pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
>       if ( !pg )

Cheers,
Jan Beulich April 25, 2022, 7:56 a.m. UTC | #2
On 24.04.2022 17:52, Julien Grall wrote:
>> Changes in v3:
>> - Use num_online_cpus() in assert.

With this ...

>> --- a/xen/common/page_alloc.c
>> +++ b/xen/common/page_alloc.c
>> @@ -162,6 +162,13 @@
>>   static char __initdata opt_badpage[100] = "";
>>   string_param("badpage", opt_badpage);
>>   
>> +/*
>> + * Heap allocations may need TLB flushes which require IRQs to be
>> + * enabled (except during early boot when only 1 PCPU is online).
> 
> Same remark as above. Also, I think there are other cases where 
> num_online_cpus() == 1:
>    - Xen is only using one core (it will not be a useful system but 
> technically supported)
>    - During suspend/resume
> 
> So I think we should either relax the comment or restrict the assert 
> below. I don't have any preference.

... I think it is the comment which wants bringing back in sync.

> + */ > +#define ASSERT_ALLOC_CONTEXT() \
> +    ASSERT(!in_irq() && (local_irq_is_enabled() || num_online_cpus() == 1))

While by the time calls here can legitimately occur the online map
should be initialized, I wonder whether it wouldn't be better to use
"<= 1" here nevertheless.

Jan
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 319029140f..516ffa2a97 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -162,6 +162,13 @@ 
 static char __initdata opt_badpage[100] = "";
 string_param("badpage", opt_badpage);
 
+/*
+ * Heap allocations may need TLB flushes which require IRQs to be
+ * enabled (except during early boot when only 1 PCPU is online).
+ */
+#define ASSERT_ALLOC_CONTEXT() \
+    ASSERT(!in_irq() && (local_irq_is_enabled() || num_online_cpus() == 1))
+
 /*
  * no-bootscrub -> Free pages are not zeroed during boot.
  */
@@ -2160,7 +2167,7 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
 {
     struct page_info *pg;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     pg = alloc_heap_pages(MEMZONE_XEN, MEMZONE_XEN,
                           order, memflags | MEMF_no_scrub, NULL);
@@ -2173,7 +2180,7 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
 
 void free_xenheap_pages(void *v, unsigned int order)
 {
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     if ( v == NULL )
         return;
@@ -2202,7 +2209,7 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
     struct page_info *pg;
     unsigned int i;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     if ( xenheap_bits && (memflags >> _MEMF_bits) > xenheap_bits )
         memflags &= ~MEMF_bits(~0U);
@@ -2224,7 +2231,7 @@  void free_xenheap_pages(void *v, unsigned int order)
     struct page_info *pg;
     unsigned int i;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     if ( v == NULL )
         return;
@@ -2249,7 +2256,7 @@  void init_domheap_pages(paddr_t ps, paddr_t pe)
 {
     mfn_t smfn, emfn;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     smfn = maddr_to_mfn(round_pgup(ps));
     emfn = maddr_to_mfn(round_pgdown(pe));
@@ -2369,7 +2376,7 @@  struct page_info *alloc_domheap_pages(
     unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
     unsigned int dma_zone;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     bits = domain_clamp_alloc_bitsize(memflags & MEMF_no_owner ? NULL : d,
                                       bits ? : (BITS_PER_LONG+PAGE_SHIFT));
@@ -2419,7 +2426,7 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
     unsigned int i;
     bool drop_dom_ref;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     if ( unlikely(is_xen_heap_page(pg)) )
     {
@@ -2738,7 +2745,7 @@  int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
 {
     struct page_info *pg;
 
-    ASSERT(!in_irq());
+    ASSERT_ALLOC_CONTEXT();
 
     pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
     if ( !pg )