diff mbox series

[6/9] mm: vmalloc: Support multiple zones in vmallocinfo

Message ID 20230522110849.2921-7-urezki@gmail.com (mailing list archive)
State New
Headers show
Series Mitigate a vmap lock contention | expand

Commit Message

Uladzislau Rezki May 22, 2023, 11:08 a.m. UTC
A global vmap area busy tree has been removed and replaced by
multiple per-cpu trees/lists, therefore we need to traversal
all per-cpu busy-lists in order to dump all allocated objects.

Please note, after this patch, dumped addresses of allocated
areas are not sequential. See an example below:

  0   1   2   0   1   2
|---|---|---|---|---|---|-> vmap space

There 3 CPUs dumping is done per-CPU zone, as you can see
address of zone_0 can be ahead of addresses residing in the
zone_1 or zone_2.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 90 ++++++++++++++++++++++++++--------------------------
 1 file changed, 45 insertions(+), 45 deletions(-)
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a9170fe19909..dd83deb5ef4f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4159,26 +4159,18 @@  bool vmalloc_dump_obj(void *object)
 
 #ifdef CONFIG_PROC_FS
 static void *s_start(struct seq_file *m, loff_t *pos)
-	__acquires(&vmap_purge_lock)
-	__acquires(&fbl(&per_cpu(cpu_vmap_zone, 0), BUSY, lock))
 {
-	mutex_lock(&vmap_purge_lock);
-	fbl_lock((&per_cpu(cpu_vmap_zone, 0)), BUSY);
-
-	return seq_list_start(&fbl_head((&per_cpu(cpu_vmap_zone, 0)), BUSY), *pos);
+	return *pos < 1 ? (void *)1 : NULL;
 }
 
 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
 {
-	return seq_list_next(p, &fbl_head((&per_cpu(cpu_vmap_zone, 0)), BUSY), pos);
+	++*pos;
+	return NULL;
 }
 
 static void s_stop(struct seq_file *m, void *p)
-	__releases(&fbl(&per_cpu(cpu_vmap_zone, 0), BUSY, lock))
-	__releases(&vmap_purge_lock)
 {
-	fbl_unlock((&per_cpu(cpu_vmap_zone, 0)), BUSY);
-	mutex_unlock(&vmap_purge_lock);
 }
 
 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
@@ -4209,6 +4201,7 @@  static void show_purge_info(struct seq_file *m)
 {
 	struct vmap_area *va;
 
+	mutex_lock(&vmap_purge_lock);
 	spin_lock(&purge_vmap_area_lock);
 	list_for_each_entry(va, &purge_vmap_area_list, list) {
 		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
@@ -4216,65 +4209,72 @@  static void show_purge_info(struct seq_file *m)
 			va->va_end - va->va_start);
 	}
 	spin_unlock(&purge_vmap_area_lock);
+	mutex_unlock(&vmap_purge_lock);
 }
 
 static int s_show(struct seq_file *m, void *p)
 {
+	struct cpu_vmap_zone *z;
 	struct vmap_area *va;
 	struct vm_struct *v;
+	int i;
 
-	va = list_entry(p, struct vmap_area, list);
+	for_each_possible_cpu(i) {
+		z = per_cpu_ptr(&cpu_vmap_zone, i);
 
-	if (!va->vm) {
-		if (va->flags & VMAP_RAM)
-			seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
-				(void *)va->va_start, (void *)va->va_end,
-				va->va_end - va->va_start);
+		fbl_lock(z, BUSY);
+		list_for_each_entry(va, &fbl_head(z, BUSY), list) {
+			if (!va->vm) {
+				if (va->flags & VMAP_RAM)
+					seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
+						(void *)va->va_start, (void *)va->va_end,
+						va->va_end - va->va_start);
 
-		goto final;
-	}
+				continue;
+			}
 
-	v = va->vm;
+			v = va->vm;
 
-	seq_printf(m, "0x%pK-0x%pK %7ld",
-		v->addr, v->addr + v->size, v->size);
+			seq_printf(m, "0x%pK-0x%pK %7ld",
+			   v->addr, v->addr + v->size, v->size);
 
-	if (v->caller)
-		seq_printf(m, " %pS", v->caller);
+			if (v->caller)
+				seq_printf(m, " %pS", v->caller);
 
-	if (v->nr_pages)
-		seq_printf(m, " pages=%d", v->nr_pages);
+			if (v->nr_pages)
+				seq_printf(m, " pages=%d", v->nr_pages);
 
-	if (v->phys_addr)
-		seq_printf(m, " phys=%pa", &v->phys_addr);
+			if (v->phys_addr)
+				seq_printf(m, " phys=%pa", &v->phys_addr);
 
-	if (v->flags & VM_IOREMAP)
-		seq_puts(m, " ioremap");
+			if (v->flags & VM_IOREMAP)
+				seq_puts(m, " ioremap");
 
-	if (v->flags & VM_ALLOC)
-		seq_puts(m, " vmalloc");
+			if (v->flags & VM_ALLOC)
+				seq_puts(m, " vmalloc");
 
-	if (v->flags & VM_MAP)
-		seq_puts(m, " vmap");
+			if (v->flags & VM_MAP)
+				seq_puts(m, " vmap");
 
-	if (v->flags & VM_USERMAP)
-		seq_puts(m, " user");
+			if (v->flags & VM_USERMAP)
+				seq_puts(m, " user");
 
-	if (v->flags & VM_DMA_COHERENT)
-		seq_puts(m, " dma-coherent");
+			if (v->flags & VM_DMA_COHERENT)
+				seq_puts(m, " dma-coherent");
 
-	if (is_vmalloc_addr(v->pages))
-		seq_puts(m, " vpages");
+			if (is_vmalloc_addr(v->pages))
+				seq_puts(m, " vpages");
 
-	show_numa_info(m, v);
-	seq_putc(m, '\n');
+			show_numa_info(m, v);
+			seq_putc(m, '\n');
+		}
+		fbl_unlock(z, BUSY);
+	}
 
 	/*
 	 * As a final step, dump "unpurged" areas.
 	 */
-final:
-	if (list_is_last(&va->list, &fbl_head((&per_cpu(cpu_vmap_zone, 0)), BUSY)))
-		show_purge_info(m);
+	show_purge_info(m);
 
 	return 0;
 }