Message ID | 20230523140002.799808756@linutronix.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/vmalloc: Assorted fixes and improvements | expand |
On Tue, May 23, 2023 at 04:02:15PM +0200, Thomas Gleixner wrote: > purge_fragmented_blocks() accesses vmap_block::free and vmap_block::dirty > lockless for a quick check. > > Add the missing READ/WRITE_ONCE() annotations. > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > --- > mm/vmalloc.c | 11 +++++++---- > 1 file changed, 7 insertions(+), 4 deletions(-) > > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -2093,9 +2093,9 @@ static bool purge_fragmented_block(struc > return false; > > /* prevent further allocs after releasing lock */ > - vb->free = 0; > + WRITE_ONCE(vb->free, 0); > /* prevent purging it again */ > - vb->dirty = VMAP_BBMAP_BITS; > + WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); > vb->dirty_min = 0; > vb->dirty_max = VMAP_BBMAP_BITS; > spin_lock(&vbq->lock); > @@ -2123,7 +2123,10 @@ static void purge_fragmented_blocks(int > > rcu_read_lock(); > list_for_each_entry_rcu(vb, &vbq->free, free_list) { > - if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) > + unsigned long free = READ_ONCE(vb->free); > + unsigned long dirty = READ_ONCE(vb->dirty); > + > + if (!(free + dirty == VMAP_BBMAP_BITS && dirty != VMAP_BBMAP_BITS)) > continue; > > spin_lock(&vb->lock); > @@ -2231,7 +2234,7 @@ static void vb_free(unsigned long addr, > vb->dirty_min = min(vb->dirty_min, offset); > vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); > > - vb->dirty += 1UL << order; > + WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); > if (vb->dirty == VMAP_BBMAP_BITS) { > BUG_ON(vb->free); > spin_unlock(&vb->lock); > Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> -- Uladzislau Rezki
--- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2093,9 +2093,9 @@ static bool purge_fragmented_block(struc return false; /* prevent further allocs after releasing lock */ - vb->free = 0; + WRITE_ONCE(vb->free, 0); /* prevent purging it again */ - vb->dirty = VMAP_BBMAP_BITS; + WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); vb->dirty_min = 0; vb->dirty_max = VMAP_BBMAP_BITS; spin_lock(&vbq->lock); @@ -2123,7 +2123,10 @@ static void purge_fragmented_blocks(int rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { - if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) + unsigned long free = READ_ONCE(vb->free); + unsigned long dirty = READ_ONCE(vb->dirty); + + if (!(free + dirty == VMAP_BBMAP_BITS && dirty != VMAP_BBMAP_BITS)) continue; spin_lock(&vb->lock); @@ -2231,7 +2234,7 @@ static void vb_free(unsigned long addr, vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); - vb->dirty += 1UL << order; + WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock);
purge_fragmented_blocks() accesses vmap_block::free and vmap_block::dirty lockless for a quick check. Add the missing READ/WRITE_ONCE() annotations. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- mm/vmalloc.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-)