diff mbox series

[v2,08/11] mm/memory-failure: Convert hwpoison_user_mappings to take a folio

Message ID 20240408194232.118537-9-willy@infradead.org (mailing list archive)
State New
Headers show
Series Some cleanups for memory-failure | expand

Commit Message

Matthew Wilcox April 8, 2024, 7:42 p.m. UTC
Pass the folio from the callers, and use it throughout instead of hpage.
Saves dozens of calls to compound_head().

Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memory-failure.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

Comments

Jane Chu April 9, 2024, 6:15 a.m. UTC | #1
On 4/8/2024 12:42 PM, Matthew Wilcox (Oracle) wrote:

> Pass the folio from the callers, and use it throughout instead of hpage.
> Saves dozens of calls to compound_head().
>
> Acked-by: Miaohe Lin <linmiaohe@huawei.com>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/memory-failure.c | 30 +++++++++++++++---------------
>   1 file changed, 15 insertions(+), 15 deletions(-)
>
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 1c7c73776604..fae0b42f0aaf 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1543,24 +1543,24 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
>    * Do all that is necessary to remove user space mappings. Unmap
>    * the pages and send SIGBUS to the processes if the data was dirty.
>    */
> -static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> -				  int flags, struct page *hpage)
> +static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
> +		unsigned long pfn, int flags)
>   {
> -	struct folio *folio = page_folio(hpage);
>   	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
>   	struct address_space *mapping;
>   	LIST_HEAD(tokill);
>   	bool unmap_success;
>   	int forcekill;
> -	bool mlocked = PageMlocked(hpage);
> +	bool mlocked = folio_test_mlocked(folio);
>   
>   	/*
>   	 * Here we are interested only in user-mapped pages, so skip any
>   	 * other types of pages.
>   	 */
> -	if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
> +	if (folio_test_reserved(folio) || folio_test_slab(folio) ||
> +	    folio_test_pgtable(folio) || folio_test_offline(folio))
>   		return true;
> -	if (!(PageLRU(hpage) || PageHuge(p)))
> +	if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
>   		return true;
>   
>   	/*
> @@ -1570,7 +1570,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>   	if (!page_mapped(p))
>   		return true;
>   
> -	if (PageSwapCache(p)) {
> +	if (folio_test_swapcache(folio)) {
>   		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
>   		ttu &= ~TTU_HWPOISON;
>   	}
> @@ -1581,11 +1581,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>   	 * XXX: the dirty test could be racy: set_page_dirty() may not always
>   	 * be called inside page lock (it's recommended but not enforced).
>   	 */
> -	mapping = page_mapping(hpage);
> -	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
> +	mapping = folio_mapping(folio);
> +	if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
>   	    mapping_can_writeback(mapping)) {
> -		if (page_mkclean(hpage)) {
> -			SetPageDirty(hpage);
> +		if (folio_mkclean(folio)) {
> +			folio_set_dirty(folio);
>   		} else {
>   			ttu &= ~TTU_HWPOISON;
>   			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
> @@ -1600,7 +1600,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>   	 */
>   	collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
>   
> -	if (PageHuge(hpage) && !PageAnon(hpage)) {
> +	if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
>   		/*
>   		 * For hugetlb pages in shared mappings, try_to_unmap
>   		 * could potentially call huge_pmd_unshare.  Because of
> @@ -1640,7 +1640,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>   	 * use a more force-full uncatchable kill to prevent
>   	 * any accesses to the poisoned memory.
>   	 */
> -	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
> +	forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
>   		    !unmap_success;
>   	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
>   
> @@ -2084,7 +2084,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
>   
>   	page_flags = folio->flags;
>   
> -	if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
> +	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
>   		folio_unlock(folio);
>   		return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
>   	}
> @@ -2351,7 +2351,7 @@ int memory_failure(unsigned long pfn, int flags)
>   	 * Now take care of user space mappings.
>   	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
>   	 */
> -	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
> +	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
>   		res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
>   		goto unlock_page;
>   	}

Looks good to me.

Reviewed-by: Jane Chu <jane.chu@oracle.com>

-jane
diff mbox series

Patch

diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1c7c73776604..fae0b42f0aaf 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1543,24 +1543,24 @@  static int get_hwpoison_page(struct page *p, unsigned long flags)
  * Do all that is necessary to remove user space mappings. Unmap
  * the pages and send SIGBUS to the processes if the data was dirty.
  */
-static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
-				  int flags, struct page *hpage)
+static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
+		unsigned long pfn, int flags)
 {
-	struct folio *folio = page_folio(hpage);
 	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
 	struct address_space *mapping;
 	LIST_HEAD(tokill);
 	bool unmap_success;
 	int forcekill;
-	bool mlocked = PageMlocked(hpage);
+	bool mlocked = folio_test_mlocked(folio);
 
 	/*
 	 * Here we are interested only in user-mapped pages, so skip any
 	 * other types of pages.
 	 */
-	if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
+	if (folio_test_reserved(folio) || folio_test_slab(folio) ||
+	    folio_test_pgtable(folio) || folio_test_offline(folio))
 		return true;
-	if (!(PageLRU(hpage) || PageHuge(p)))
+	if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
 		return true;
 
 	/*
@@ -1570,7 +1570,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	if (!page_mapped(p))
 		return true;
 
-	if (PageSwapCache(p)) {
+	if (folio_test_swapcache(folio)) {
 		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
 		ttu &= ~TTU_HWPOISON;
 	}
@@ -1581,11 +1581,11 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	 * XXX: the dirty test could be racy: set_page_dirty() may not always
 	 * be called inside page lock (it's recommended but not enforced).
 	 */
-	mapping = page_mapping(hpage);
-	if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
+	mapping = folio_mapping(folio);
+	if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
 	    mapping_can_writeback(mapping)) {
-		if (page_mkclean(hpage)) {
-			SetPageDirty(hpage);
+		if (folio_mkclean(folio)) {
+			folio_set_dirty(folio);
 		} else {
 			ttu &= ~TTU_HWPOISON;
 			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
@@ -1600,7 +1600,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	 */
 	collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
 
-	if (PageHuge(hpage) && !PageAnon(hpage)) {
+	if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
 		/*
 		 * For hugetlb pages in shared mappings, try_to_unmap
 		 * could potentially call huge_pmd_unshare.  Because of
@@ -1640,7 +1640,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 	 * use a more force-full uncatchable kill to prevent
 	 * any accesses to the poisoned memory.
 	 */
-	forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
+	forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
 		    !unmap_success;
 	kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
 
@@ -2084,7 +2084,7 @@  static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
 
 	page_flags = folio->flags;
 
-	if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
+	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
 		folio_unlock(folio);
 		return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
 	}
@@ -2351,7 +2351,7 @@  int memory_failure(unsigned long pfn, int flags)
 	 * Now take care of user space mappings.
 	 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
 	 */
-	if (!hwpoison_user_mappings(p, pfn, flags, p)) {
+	if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
 		res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
 		goto unlock_page;
 	}