diff mbox series

[v3,1/7] mm/vmalloc.c: add used_map into vmap_block to track space of vmap_block

Message ID 20230113031921.64716-2-bhe@redhat.com (mailing list archive)
State New
Headers show
Series mm/vmalloc.c: allow vread() to read out vm_map_ram areas | expand

Commit Message

Baoquan He Jan. 13, 2023, 3:19 a.m. UTC
In one vmap_block area, there could be three types of regions: region
being used which is allocated through vb_alloc(), dirty region which
is freed via vb_free() and free region. Among them, only used region
has available data. While there's no way to track those used regions
currently.

Here, add bitmap field used_map into vmap_block, and set/clear it during
allocation or freeing regions of vmap_block area.

This is a preparatoin for later use.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/vmalloc.c | 7 +++++++
 1 file changed, 7 insertions(+)

Comments

Uladzislau Rezki Jan. 16, 2023, 11:39 a.m. UTC | #1
On Fri, Jan 13, 2023 at 11:19:15AM +0800, Baoquan He wrote:
> In one vmap_block area, there could be three types of regions: region
> being used which is allocated through vb_alloc(), dirty region which
> is freed via vb_free() and free region. Among them, only used region
> has available data. While there's no way to track those used regions
> currently.
> 
> Here, add bitmap field used_map into vmap_block, and set/clear it during
> allocation or freeing regions of vmap_block area.
> 
> This is a preparatoin for later use.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 7 +++++++
>  1 file changed, 7 insertions(+)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 428e0bee5c9c..d6ff058ef4d0 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1922,6 +1922,7 @@ struct vmap_block {
>  	spinlock_t lock;
>  	struct vmap_area *va;
>  	unsigned long free, dirty;
> +	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
>  	unsigned long dirty_min, dirty_max; /*< dirty range */
>  	struct list_head free_list;
>  	struct rcu_head rcu_head;
> @@ -1998,10 +1999,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  	vb->va = va;
>  	/* At least something should be left free */
>  	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
> +	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
>  	vb->free = VMAP_BBMAP_BITS - (1UL << order);
>  	vb->dirty = 0;
>  	vb->dirty_min = VMAP_BBMAP_BITS;
>  	vb->dirty_max = 0;
> +	bitmap_set(vb->used_map, 0, (1UL << order));
>  	INIT_LIST_HEAD(&vb->free_list);
>  
>  	vb_idx = addr_to_vb_idx(va->va_start);
> @@ -2111,6 +2114,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
>  		pages_off = VMAP_BBMAP_BITS - vb->free;
>  		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
>  		vb->free -= 1UL << order;
> +		bitmap_set(vb->used_map, pages_off, (1UL << order));
>  		if (vb->free == 0) {
>  			spin_lock(&vbq->lock);
>  			list_del_rcu(&vb->free_list);
> @@ -2144,6 +2148,9 @@ static void vb_free(unsigned long addr, unsigned long size)
>  	order = get_order(size);
>  	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
>  	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
> +	spin_lock(&vb->lock);
> +	bitmap_clear(vb->used_map, offset, (1UL << order));
> +	spin_unlock(&vb->lock);
>  
>  	vunmap_range_noflush(addr, addr + size);
>  
> -- 
> 2.34.1
> 
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>

--
Uladzislau Rezki
Lorenzo Stoakes Jan. 16, 2023, 12:22 p.m. UTC | #2
On Fri, Jan 13, 2023 at 11:19:15AM +0800, Baoquan He wrote:
> In one vmap_block area, there could be three types of regions: region
> being used which is allocated through vb_alloc(), dirty region which
> is freed via vb_free() and free region. Among them, only used region
> has available data. While there's no way to track those used regions
> currently.
>
> Here, add bitmap field used_map into vmap_block, and set/clear it during
> allocation or freeing regions of vmap_block area.
>
> This is a preparatoin for later use.
>
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 7 +++++++
>  1 file changed, 7 insertions(+)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 428e0bee5c9c..d6ff058ef4d0 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1922,6 +1922,7 @@ struct vmap_block {
>  	spinlock_t lock;
>  	struct vmap_area *va;
>  	unsigned long free, dirty;
> +	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
>  	unsigned long dirty_min, dirty_max; /*< dirty range */
>  	struct list_head free_list;
>  	struct rcu_head rcu_head;
> @@ -1998,10 +1999,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
>  	vb->va = va;
>  	/* At least something should be left free */
>  	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
> +	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
>  	vb->free = VMAP_BBMAP_BITS - (1UL << order);
>  	vb->dirty = 0;
>  	vb->dirty_min = VMAP_BBMAP_BITS;
>  	vb->dirty_max = 0;
> +	bitmap_set(vb->used_map, 0, (1UL << order));
>  	INIT_LIST_HEAD(&vb->free_list);
>
>  	vb_idx = addr_to_vb_idx(va->va_start);
> @@ -2111,6 +2114,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
>  		pages_off = VMAP_BBMAP_BITS - vb->free;
>  		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
>  		vb->free -= 1UL << order;
> +		bitmap_set(vb->used_map, pages_off, (1UL << order));
>  		if (vb->free == 0) {
>  			spin_lock(&vbq->lock);
>  			list_del_rcu(&vb->free_list);
> @@ -2144,6 +2148,9 @@ static void vb_free(unsigned long addr, unsigned long size)
>  	order = get_order(size);
>  	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
>  	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
> +	spin_lock(&vb->lock);
> +	bitmap_clear(vb->used_map, offset, (1UL << order));
> +	spin_unlock(&vb->lock);
>
>  	vunmap_range_noflush(addr, addr + size);
>
> --
> 2.34.1
>

Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 428e0bee5c9c..d6ff058ef4d0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1922,6 +1922,7 @@  struct vmap_block {
 	spinlock_t lock;
 	struct vmap_area *va;
 	unsigned long free, dirty;
+	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
 	unsigned long dirty_min, dirty_max; /*< dirty range */
 	struct list_head free_list;
 	struct rcu_head rcu_head;
@@ -1998,10 +1999,12 @@  static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
 	vb->va = va;
 	/* At least something should be left free */
 	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
+	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
 	vb->free = VMAP_BBMAP_BITS - (1UL << order);
 	vb->dirty = 0;
 	vb->dirty_min = VMAP_BBMAP_BITS;
 	vb->dirty_max = 0;
+	bitmap_set(vb->used_map, 0, (1UL << order));
 	INIT_LIST_HEAD(&vb->free_list);
 
 	vb_idx = addr_to_vb_idx(va->va_start);
@@ -2111,6 +2114,7 @@  static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 		pages_off = VMAP_BBMAP_BITS - vb->free;
 		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
 		vb->free -= 1UL << order;
+		bitmap_set(vb->used_map, pages_off, (1UL << order));
 		if (vb->free == 0) {
 			spin_lock(&vbq->lock);
 			list_del_rcu(&vb->free_list);
@@ -2144,6 +2148,9 @@  static void vb_free(unsigned long addr, unsigned long size)
 	order = get_order(size);
 	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
 	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
+	spin_lock(&vb->lock);
+	bitmap_clear(vb->used_map, offset, (1UL << order));
+	spin_unlock(&vb->lock);
 
 	vunmap_range_noflush(addr, addr + size);