diff mbox series

[2/2] mm: hugetlb: log time needed to allocate hugepages

Message ID 20250221-hugepage-parameter-v1-2-fa49a77c87c8@cyberus-technology.de (mailing list archive)
State New
Headers show
Series Add a command line option that enables control of how many threads per NUMA node should be used to allocate huge pages. | expand

Commit Message

Thomas Prescher via B4 Relay Feb. 21, 2025, 1:49 p.m. UTC
From: Thomas Prescher <thomas.prescher@cyberus-technology.de>

Having this information allows users to easily tune
the hugepages_node_threads parameter.

Signed-off-by: Thomas Prescher <thomas.prescher@cyberus-technology.de>
---
 mm/hugetlb.c | 9 +++++++++
 1 file changed, 9 insertions(+)
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b7d24c41e0f9d22f5b86c253e29a2eca28460026..2aa5724a385494f9d6f1d644a2bfe547591fc96c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3428,6 +3428,9 @@  static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
 		.numa_aware	= true
 	};
 
+	unsigned long jiffies_start;
+	unsigned long jiffies_end;
+
 	job.thread_fn	= hugetlb_pages_alloc_boot_node;
 	job.start	= 0;
 	job.size	= h->max_huge_pages;
@@ -3450,7 +3453,13 @@  static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
 	 */
 	job.max_threads	= num_node_state(N_MEMORY) * allocation_threads_per_node;
 	job.min_chunk	= h->max_huge_pages / num_node_state(N_MEMORY) / allocation_threads_per_node;
+
+	jiffies_start = jiffies;
 	padata_do_multithreaded(&job);
+	jiffies_end = jiffies;
+
+	printk(KERN_DEBUG "HugeTLB: allocation took %dms\n",
+	    jiffies_to_msecs(jiffies_end - jiffies_start));
 
 	return h->nr_huge_pages;
 }