diff mbox series

[v2,02/10] mm/thp: make is_huge_zero_pmd() safe and quicker

Message ID 21ea9ca-a1f5-8b90-5e88-95fb1c49bbfa@google.com (mailing list archive)
State New, archived
Headers show
Series mm/thp: fix THP splitting unmap BUGs and related (fwd) | expand

Commit Message

Hugh Dickins June 9, 2021, 4:08 a.m. UTC
Most callers of is_huge_zero_pmd() supply a pmd already verified present;
but a few (notably zap_huge_pmd()) do not - it might be a pmd migration
entry, in which the pfn is encoded differently from a present pmd: which
might pass the is_huge_zero_pmd() test (though not on x86, since L1TF
forced us to protect against that); or perhaps even crash in pmd_page()
applied to a swap-like entry.

Make it safe by adding pmd_present() check into is_huge_zero_pmd() itself;
and make it quicker by saving huge_zero_pfn, so that is_huge_zero_pmd()
will not need to do that pmd_page() lookup each time.

__split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
but is unnecessary now that is_huge_zero_pmd() checks present.

Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
---
Patch added (replacing part of first) since the v1 series was posted.

 include/linux/huge_mm.h | 8 +++++++-
 mm/huge_memory.c        | 5 ++++-
 2 files changed, 11 insertions(+), 2 deletions(-)

Comments

Kirill A. Shutemov June 9, 2021, 10:22 a.m. UTC | #1
On Tue, Jun 08, 2021 at 09:08:09PM -0700, Hugh Dickins wrote:
> Most callers of is_huge_zero_pmd() supply a pmd already verified present;
> but a few (notably zap_huge_pmd()) do not - it might be a pmd migration
> entry, in which the pfn is encoded differently from a present pmd: which
> might pass the is_huge_zero_pmd() test (though not on x86, since L1TF
> forced us to protect against that); or perhaps even crash in pmd_page()
> applied to a swap-like entry.
> 
> Make it safe by adding pmd_present() check into is_huge_zero_pmd() itself;
> and make it quicker by saving huge_zero_pfn, so that is_huge_zero_pmd()
> will not need to do that pmd_page() lookup each time.
> 
> __split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
> but is unnecessary now that is_huge_zero_pmd() checks present.
> 
> Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Hugh Dickins <hughd@google.com>
> Cc: <stable@vger.kernel.org>

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Yang Shi June 9, 2021, 4:56 p.m. UTC | #2
On Tue, Jun 8, 2021 at 9:08 PM Hugh Dickins <hughd@google.com> wrote:
>
> Most callers of is_huge_zero_pmd() supply a pmd already verified present;
> but a few (notably zap_huge_pmd()) do not - it might be a pmd migration
> entry, in which the pfn is encoded differently from a present pmd: which
> might pass the is_huge_zero_pmd() test (though not on x86, since L1TF
> forced us to protect against that); or perhaps even crash in pmd_page()
> applied to a swap-like entry.
>
> Make it safe by adding pmd_present() check into is_huge_zero_pmd() itself;
> and make it quicker by saving huge_zero_pfn, so that is_huge_zero_pmd()
> will not need to do that pmd_page() lookup each time.
>
> __split_huge_pmd_locked() checked pmd_trans_huge() before: that worked,
> but is unnecessary now that is_huge_zero_pmd() checks present.
>
> Fixes: e71769ae5260 ("mm: enable thp migration for shmem thp")
> Signed-off-by: Hugh Dickins <hughd@google.com>
> Cc: <stable@vger.kernel.org>

Reviewed-by: Yang Shi <shy828301@gmail.com>

> ---
> Patch added (replacing part of first) since the v1 series was posted.
>
>  include/linux/huge_mm.h | 8 +++++++-
>  mm/huge_memory.c        | 5 ++++-
>  2 files changed, 11 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 9626fda5efce..2a8ebe6c222e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -286,6 +286,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
>  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
>
>  extern struct page *huge_zero_page;
> +extern unsigned long huge_zero_pfn;
>
>  static inline bool is_huge_zero_page(struct page *page)
>  {
> @@ -294,7 +295,7 @@ static inline bool is_huge_zero_page(struct page *page)
>
>  static inline bool is_huge_zero_pmd(pmd_t pmd)
>  {
> -       return is_huge_zero_page(pmd_page(pmd));
> +       return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
>  }
>
>  static inline bool is_huge_zero_pud(pud_t pud)
> @@ -440,6 +441,11 @@ static inline bool is_huge_zero_page(struct page *page)
>         return false;
>  }
>
> +static inline bool is_huge_zero_pmd(pmd_t pmd)
> +{
> +       return false;
> +}
> +
>  static inline bool is_huge_zero_pud(pud_t pud)
>  {
>         return false;
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 42cfefc6e66e..5885c5f5836f 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -62,6 +62,7 @@ static struct shrinker deferred_split_shrinker;
>
>  static atomic_t huge_zero_refcount;
>  struct page *huge_zero_page __read_mostly;
> +unsigned long huge_zero_pfn __read_mostly = ~0UL;
>
>  bool transparent_hugepage_enabled(struct vm_area_struct *vma)
>  {
> @@ -98,6 +99,7 @@ static bool get_huge_zero_page(void)
>                 __free_pages(zero_page, compound_order(zero_page));
>                 goto retry;
>         }
> +       WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
>
>         /* We take additional reference here. It will be put back by shrinker */
>         atomic_set(&huge_zero_refcount, 2);
> @@ -147,6 +149,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
>         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
>                 struct page *zero_page = xchg(&huge_zero_page, NULL);
>                 BUG_ON(zero_page == NULL);
> +               WRITE_ONCE(huge_zero_pfn, ~0UL);
>                 __free_pages(zero_page, compound_order(zero_page));
>                 return HPAGE_PMD_NR;
>         }
> @@ -2071,7 +2074,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>                 return;
>         }
>
> -       if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
> +       if (is_huge_zero_pmd(*pmd)) {
>                 /*
>                  * FIXME: Do we want to invalidate secondary mmu by calling
>                  * mmu_notifier_invalidate_range() see comments below inside
> --
> 2.26.2
>
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9626fda5efce..2a8ebe6c222e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -286,6 +286,7 @@  struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 
 extern struct page *huge_zero_page;
+extern unsigned long huge_zero_pfn;
 
 static inline bool is_huge_zero_page(struct page *page)
 {
@@ -294,7 +295,7 @@  static inline bool is_huge_zero_page(struct page *page)
 
 static inline bool is_huge_zero_pmd(pmd_t pmd)
 {
-	return is_huge_zero_page(pmd_page(pmd));
+	return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
 }
 
 static inline bool is_huge_zero_pud(pud_t pud)
@@ -440,6 +441,11 @@  static inline bool is_huge_zero_page(struct page *page)
 	return false;
 }
 
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+	return false;
+}
+
 static inline bool is_huge_zero_pud(pud_t pud)
 {
 	return false;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 42cfefc6e66e..5885c5f5836f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -62,6 +62,7 @@  static struct shrinker deferred_split_shrinker;
 
 static atomic_t huge_zero_refcount;
 struct page *huge_zero_page __read_mostly;
+unsigned long huge_zero_pfn __read_mostly = ~0UL;
 
 bool transparent_hugepage_enabled(struct vm_area_struct *vma)
 {
@@ -98,6 +99,7 @@  static bool get_huge_zero_page(void)
 		__free_pages(zero_page, compound_order(zero_page));
 		goto retry;
 	}
+	WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
 
 	/* We take additional reference here. It will be put back by shrinker */
 	atomic_set(&huge_zero_refcount, 2);
@@ -147,6 +149,7 @@  static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
 	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
 		struct page *zero_page = xchg(&huge_zero_page, NULL);
 		BUG_ON(zero_page == NULL);
+		WRITE_ONCE(huge_zero_pfn, ~0UL);
 		__free_pages(zero_page, compound_order(zero_page));
 		return HPAGE_PMD_NR;
 	}
@@ -2071,7 +2074,7 @@  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 		return;
 	}
 
-	if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+	if (is_huge_zero_pmd(*pmd)) {
 		/*
 		 * FIXME: Do we want to invalidate secondary mmu by calling
 		 * mmu_notifier_invalidate_range() see comments below inside