diff mbox series

[2/2] mm: vmalloc: Refactor vmalloc_dump_obj() function

Message ID 20240124180920.50725-2-urezki@gmail.com (mailing list archive)
State New
Headers show
Series [1/2] mm: vmalloc: Improve description of vmap node layer | expand

Commit Message

Uladzislau Rezki Jan. 24, 2024, 6:09 p.m. UTC
This patch tends to simplify the function in question,
by removing an extra stack "objp" variable, returning
back to an early exit approach if spin_trylock() fails
or VA was not found.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 33 +++++++++++++++++----------------
 1 file changed, 17 insertions(+), 16 deletions(-)

Comments

Lorenzo Stoakes Jan. 30, 2024, 6:50 p.m. UTC | #1
On Wed, Jan 24, 2024 at 07:09:20PM +0100, Uladzislau Rezki (Sony) wrote:
> This patch tends to simplify the function in question,
> by removing an extra stack "objp" variable, returning
> back to an early exit approach if spin_trylock() fails
> or VA was not found.
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  mm/vmalloc.c | 33 +++++++++++++++++----------------
>  1 file changed, 17 insertions(+), 16 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index b8be601b056d..449f45b0e474 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -4696,34 +4696,35 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
>  #ifdef CONFIG_PRINTK
>  bool vmalloc_dump_obj(void *object)
>  {
> -	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
>  	const void *caller;
> +	struct vm_struct *vm;
>  	struct vmap_area *va;
>  	struct vmap_node *vn;
>  	unsigned long addr;
>  	unsigned int nr_pages;
> -	bool success = false;
> -
> -	vn = addr_to_node((unsigned long)objp);
>
> -	if (spin_trylock(&vn->busy.lock)) {
> -		va = __find_vmap_area((unsigned long)objp, &vn->busy.root);
> +	addr = PAGE_ALIGN((unsigned long) object);
> +	vn = addr_to_node(addr);
>
> -		if (va && va->vm) {
> -			addr = (unsigned long)va->vm->addr;
> -			caller = va->vm->caller;
> -			nr_pages = va->vm->nr_pages;
> -			success = true;
> -		}
> +	if (!spin_trylock(&vn->busy.lock))
> +		return false;
>
> +	va = __find_vmap_area(addr, &vn->busy.root);
> +	if (!va || !va->vm) {
>  		spin_unlock(&vn->busy.lock);
> +		return false;
>  	}
>
> -	if (success)
> -		pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
> -			nr_pages, addr, caller);
> +	vm = va->vm;
> +	addr = (unsigned long) vm->addr;

Hmm not so nice to reuse addr here for something different, might be nice
to have separate obj_addr and vm_addr or something. But it's not critical!

> +	caller = vm->caller;
> +	nr_pages = vm->nr_pages;
> +	spin_unlock(&vn->busy.lock);
>
> -	return success;
> +	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
> +		nr_pages, addr, caller);
> +
> +	return true;
>  }
>  #endif
>
> --
> 2.39.2
>

Other than the nit, which I don't insist on, this is a big improvement so,

Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Uladzislau Rezki Jan. 31, 2024, 9:49 a.m. UTC | #2
On Tue, Jan 30, 2024 at 06:50:48PM +0000, Lorenzo Stoakes wrote:
> On Wed, Jan 24, 2024 at 07:09:20PM +0100, Uladzislau Rezki (Sony) wrote:
> > This patch tends to simplify the function in question,
> > by removing an extra stack "objp" variable, returning
> > back to an early exit approach if spin_trylock() fails
> > or VA was not found.
> >
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > ---
> >  mm/vmalloc.c | 33 +++++++++++++++++----------------
> >  1 file changed, 17 insertions(+), 16 deletions(-)
> >
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index b8be601b056d..449f45b0e474 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -4696,34 +4696,35 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
> >  #ifdef CONFIG_PRINTK
> >  bool vmalloc_dump_obj(void *object)
> >  {
> > -	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
> >  	const void *caller;
> > +	struct vm_struct *vm;
> >  	struct vmap_area *va;
> >  	struct vmap_node *vn;
> >  	unsigned long addr;
> >  	unsigned int nr_pages;
> > -	bool success = false;
> > -
> > -	vn = addr_to_node((unsigned long)objp);
> >
> > -	if (spin_trylock(&vn->busy.lock)) {
> > -		va = __find_vmap_area((unsigned long)objp, &vn->busy.root);
> > +	addr = PAGE_ALIGN((unsigned long) object);
> > +	vn = addr_to_node(addr);
> >
> > -		if (va && va->vm) {
> > -			addr = (unsigned long)va->vm->addr;
> > -			caller = va->vm->caller;
> > -			nr_pages = va->vm->nr_pages;
> > -			success = true;
> > -		}
> > +	if (!spin_trylock(&vn->busy.lock))
> > +		return false;
> >
> > +	va = __find_vmap_area(addr, &vn->busy.root);
> > +	if (!va || !va->vm) {
> >  		spin_unlock(&vn->busy.lock);
> > +		return false;
> >  	}
> >
> > -	if (success)
> > -		pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
> > -			nr_pages, addr, caller);
> > +	vm = va->vm;
> > +	addr = (unsigned long) vm->addr;
> 
> Hmm not so nice to reuse addr here for something different, might be nice
> to have separate obj_addr and vm_addr or something. But it's not critical!
> 
> > +	caller = vm->caller;
> > +	nr_pages = vm->nr_pages;
> > +	spin_unlock(&vn->busy.lock);
> >
> > -	return success;
> > +	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
> > +		nr_pages, addr, caller);
> > +
> > +	return true;
> >  }
> >  #endif
> >
> > --
> > 2.39.2
> >
> 
> Other than the nit, which I don't insist on, this is a big improvement so,
> 
> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
>

Thanks!

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b8be601b056d..449f45b0e474 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4696,34 +4696,35 @@  void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
 #ifdef CONFIG_PRINTK
 bool vmalloc_dump_obj(void *object)
 {
-	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
 	const void *caller;
+	struct vm_struct *vm;
 	struct vmap_area *va;
 	struct vmap_node *vn;
 	unsigned long addr;
 	unsigned int nr_pages;
-	bool success = false;
-
-	vn = addr_to_node((unsigned long)objp);
 
-	if (spin_trylock(&vn->busy.lock)) {
-		va = __find_vmap_area((unsigned long)objp, &vn->busy.root);
+	addr = PAGE_ALIGN((unsigned long) object);
+	vn = addr_to_node(addr);
 
-		if (va && va->vm) {
-			addr = (unsigned long)va->vm->addr;
-			caller = va->vm->caller;
-			nr_pages = va->vm->nr_pages;
-			success = true;
-		}
+	if (!spin_trylock(&vn->busy.lock))
+		return false;
 
+	va = __find_vmap_area(addr, &vn->busy.root);
+	if (!va || !va->vm) {
 		spin_unlock(&vn->busy.lock);
+		return false;
 	}
 
-	if (success)
-		pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
-			nr_pages, addr, caller);
+	vm = va->vm;
+	addr = (unsigned long) vm->addr;
+	caller = vm->caller;
+	nr_pages = vm->nr_pages;
+	spin_unlock(&vn->busy.lock);
 
-	return success;
+	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
+		nr_pages, addr, caller);
+
+	return true;
 }
 #endif