@@ -3059,7 +3059,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
unsigned int new_nr = 1 << new_order;
int order = folio_order(folio);
unsigned int nr = 1 << order;
+ struct folio_batch free_folios;
+ folio_batch_init(&free_folios);
/* complete memcg works before add pages to LRU */
split_page_memcg(head, order, new_order);
@@ -3143,6 +3145,27 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (subpage == page)
continue;
folio_unlock(new_folio);
+ /*
+ * If a folio has only two references left, one inherited
+ * from the isolation of its head and the other from
+ * lru_add_page_tail() which we are about to drop, it means this
+ * folio was concurrently zapped. Then we can safely free it
+ * and save page reclaim or migration the trouble of trying it.
+ */
+ if (list && folio_ref_freeze(new_folio, 2)) {
+ VM_WARN_ON_ONCE_FOLIO(folio_test_lru(new_folio), new_folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_large(new_folio), new_folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_mapped(new_folio), new_folio);
+
+ folio_clear_active(new_folio);
+ folio_clear_unevictable(new_folio);
+ list_del(&new_folio->lru);
+ if (!folio_batch_add(&free_folios, new_folio)) {
+ mem_cgroup_uncharge_folios(&free_folios);
+ free_unref_folios(&free_folios);
+ }
+ continue;
+ }
/*
* Subpages may be freed if there wasn't any mapping
@@ -3153,6 +3176,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
*/
free_page_and_swap_cache(subpage);
}
+
+ if (free_folios.nr) {
+ mem_cgroup_uncharge_folios(&free_folios);
+ free_unref_folios(&free_folios);
+ }
}
/* Racy check whether the huge page can be split */