@@ -2891,7 +2891,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
{
struct folio *folio = page_folio(page);
struct page *head = &folio->page;
- struct lruvec *lruvec;
+ struct lruvec *lruvec = folio_lruvec(folio);
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
int i, nr_dropped = 0;
@@ -2908,8 +2908,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
xa_lock(&swap_cache->i_pages);
}
- /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
- lruvec = folio_lruvec_lock(folio);
ClearPageHasHWPoisoned(head);
@@ -2942,7 +2940,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
folio_set_order(new_folio, new_order);
}
- unlock_page_lruvec(lruvec);
/* Caller disabled irqs, so they are still disabled here */
split_page_owner(head, order, new_order);
@@ -2961,7 +2958,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
folio_ref_add(folio, 1 + new_nr);
xa_unlock(&folio->mapping->i_pages);
}
- local_irq_enable();
if (nr_dropped)
shmem_uncharge(folio->mapping->host, nr_dropped);
@@ -3048,6 +3044,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
int extra_pins, ret;
pgoff_t end;
bool is_hzp;
+ struct lruvec *lruvec;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
@@ -3159,6 +3156,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
+
+ /*
+ * take lruvec's lock before freeze the folio to prevent the folio
+ * remains in the page cache with refcnt == 0, which could lead to
+ * find_get_entry enters livelock by iterating the xarray.
+ */
+ lruvec = folio_lruvec_lock(folio);
+
if (mapping) {
/*
* Check if the folio is present in page cache.
@@ -3203,12 +3208,16 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
}
__split_huge_page(page, list, end, new_order);
+ unlock_page_lruvec(lruvec);
+ local_irq_enable();
ret = 0;
} else {
spin_unlock(&ds_queue->split_queue_lock);
fail:
if (mapping)
xas_unlock(&xas);
+
+ unlock_page_lruvec(lruvec);
local_irq_enable();
remap_page(folio, folio_nr_pages(folio));
ret = -EAGAIN;