@@ -2230,6 +2230,11 @@ static void prep_and_add_allocated_folios(struct hstate *h,
{
struct folio *folio, *tmp_f;
+ /*
+ * Send list for bulk vmemmap optimization processing
+ */
+ hugetlb_vmemmap_optimize_folios(h, folio_list);
+
/*
* Add all new pool pages to free lists in one lock cycle
*/
@@ -484,6 +484,9 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
{
+ if (HPageVmemmapOptimized((struct page *)head))
+ return false;
+
if (!READ_ONCE(vmemmap_optimize_enabled))
return false;
@@ -573,6 +576,14 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
SetHPageVmemmapOptimized(head);
}
+void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
+{
+ struct folio *folio;
+
+ list_for_each_entry(folio, folio_list, lru)
+ hugetlb_vmemmap_optimize(h, &folio->page);
+}
+
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
{
.procname = "hugetlb_optimize_vmemmap",
@@ -13,6 +13,7 @@
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
+void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
/*
* Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
@@ -47,6 +48,10 @@ static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page
{
}
+static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
+{
+}
+
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
return 0;
When adding hugetlb pages to the pool, we first create a list of the allocated pages before adding to the pool. Pass this list of pages to a new routine hugetlb_vmemmap_optimize_folios() for vmemmap optimization. We also modify the routine vmemmap_should_optimize() to check for pages that are already optimized. There are code paths that might request vmemmap optimization twice and we want to make sure this is not attempted. Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> --- mm/hugetlb.c | 5 +++++ mm/hugetlb_vmemmap.c | 11 +++++++++++ mm/hugetlb_vmemmap.h | 5 +++++ 3 files changed, 21 insertions(+)