@@ -2645,40 +2645,40 @@ static int soft_offline_in_use_page(struct page *page)
{
long ret = 0;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_head(page);
+ struct folio *folio = page_folio(page);
char const *msg_page[] = {"page", "hugepage"};
- bool huge = PageHuge(page);
+ bool huge = folio_test_hugetlb(folio);
LIST_HEAD(pagelist);
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
- if (!huge && PageTransHuge(hpage)) {
+ if (!huge && folio_test_large(folio)) {
if (try_to_split_thp_page(page)) {
pr_info("soft offline: %#lx: thp split failed\n", pfn);
return -EBUSY;
}
- hpage = page;
+ folio = page_folio(page);
}
- lock_page(page);
+ folio_lock(folio);
if (!huge)
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
if (PageHWPoison(page)) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
pr_info("soft offline: %#lx page already poisoned\n", pfn);
return 0;
}
- if (!huge && PageLRU(page) && !PageSwapCache(page))
+ if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio))
/*
* Try to invalidate first. This should work for
* non dirty unmapped page cache pages.
*/
- ret = invalidate_inode_page(page);
- unlock_page(page);
+ ret = mapping_evict_folio(folio_mapping(folio), folio);
+ folio_unlock(folio);
if (ret) {
pr_info("soft_offline: %#lx: invalidated\n", pfn);
@@ -2686,7 +2686,7 @@ static int soft_offline_in_use_page(struct page *page)
return 0;
}
- if (isolate_page(hpage, &pagelist)) {
+ if (isolate_page(&folio->page, &pagelist)) {
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
if (!ret) {
Replace the existing head-page logic with folio logic. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/memory-failure.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)