diff mbox series

[v3,4/4] mm/page_table_check: check entries at pmd levels

Message ID 20220126060514.1574935-5-pasha.tatashin@soleen.com (mailing list archive)
State New
Headers show
Series page table check fixes and cleanups | expand

Commit Message

Pasha Tatashin Jan. 26, 2022, 6:05 a.m. UTC
syzbot detected a case where the page table counters were not properly
updated.

syzkaller login:  ------------[ cut here ]------------
kernel BUG at mm/page_table_check.c:162!
invalid opcode: 0000 [#1] PREEMPT SMP KASAN
CPU: 0 PID: 3099 Comm: pasha Not tainted 5.16.0+ #48
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIO4
RIP: 0010:__page_table_check_zero+0x159/0x1a0
Code: 7d 3a b2 ff 45 39 f5 74 2a e8 43 38 b2 ff 4d 85 e4 01
RSP: 0018:ffff888010667418 EFLAGS: 00010293
RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000
RDX: ffff88800cea8680 RSI: ffffffff81becaf9 RDI: 0000000003
RBP: ffff888010667450 R08: 0000000000000001 R09: 0000000000
R10: ffffffff81becaab R11: 0000000000000001 R12: ffff888008
R13: 0000000000000001 R14: 0000000000000200 R15: dffffc0000
FS:  0000000000000000(0000) GS:ffff888035e00000(0000) knlG0
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007ffd875cad00 CR3: 00000000094ce000 CR4: 0000000000
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000
Call Trace:
 <TASK>
 free_pcp_prepare+0x3be/0xaa0
 free_unref_page+0x1c/0x650
 ? trace_hardirqs_on+0x6a/0x1d0
 free_compound_page+0xec/0x130
 free_transhuge_page+0x1be/0x260
 __put_compound_page+0x90/0xd0
 release_pages+0x54c/0x1060
 ? filemap_remove_folio+0x161/0x210
 ? lock_downgrade+0x720/0x720
 ? __put_page+0x150/0x150
 ? filemap_free_folio+0x164/0x350
 __pagevec_release+0x7c/0x110
 shmem_undo_range+0x85e/0x1250
...

The repro involved having a huge page that is split due to uprobe event
temporarily replacing one of the pages in the huge page. Later the huge
page was combined again, but the counters were off, as the PTE level
was not properly updated.

Make sure that when PMD is cleared and prior to freeing the level the
PTEs are updated.

Fixes: df4e817b7108 ("mm: page table check")

Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
---
 include/linux/page_table_check.h | 18 ++++++++++++++++++
 mm/khugepaged.c                  |  2 ++
 mm/page_table_check.c            | 21 +++++++++++++++++++++
 3 files changed, 41 insertions(+)

Comments

David Rientjes Jan. 26, 2022, 6:45 a.m. UTC | #1
On Wed, 26 Jan 2022, Pasha Tatashin wrote:

> syzbot detected a case where the page table counters were not properly
> updated.
> 

Is there a Reported-by tag that syzbot wants us to use to track this?

> syzkaller login:  ------------[ cut here ]------------
> kernel BUG at mm/page_table_check.c:162!
> invalid opcode: 0000 [#1] PREEMPT SMP KASAN
> CPU: 0 PID: 3099 Comm: pasha Not tainted 5.16.0+ #48
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIO4
> RIP: 0010:__page_table_check_zero+0x159/0x1a0
> Code: 7d 3a b2 ff 45 39 f5 74 2a e8 43 38 b2 ff 4d 85 e4 01
> RSP: 0018:ffff888010667418 EFLAGS: 00010293
> RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000
> RDX: ffff88800cea8680 RSI: ffffffff81becaf9 RDI: 0000000003
> RBP: ffff888010667450 R08: 0000000000000001 R09: 0000000000
> R10: ffffffff81becaab R11: 0000000000000001 R12: ffff888008
> R13: 0000000000000001 R14: 0000000000000200 R15: dffffc0000
> FS:  0000000000000000(0000) GS:ffff888035e00000(0000) knlG0
> CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> CR2: 00007ffd875cad00 CR3: 00000000094ce000 CR4: 0000000000
> DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000
> DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000
> Call Trace:
>  <TASK>
>  free_pcp_prepare+0x3be/0xaa0
>  free_unref_page+0x1c/0x650
>  ? trace_hardirqs_on+0x6a/0x1d0
>  free_compound_page+0xec/0x130
>  free_transhuge_page+0x1be/0x260
>  __put_compound_page+0x90/0xd0
>  release_pages+0x54c/0x1060
>  ? filemap_remove_folio+0x161/0x210
>  ? lock_downgrade+0x720/0x720
>  ? __put_page+0x150/0x150
>  ? filemap_free_folio+0x164/0x350
>  __pagevec_release+0x7c/0x110
>  shmem_undo_range+0x85e/0x1250
> ...
> 
> The repro involved having a huge page that is split due to uprobe event
> temporarily replacing one of the pages in the huge page. Later the huge
> page was combined again, but the counters were off, as the PTE level
> was not properly updated.
> 
> Make sure that when PMD is cleared and prior to freeing the level the
> PTEs are updated.
> 
> Fixes: df4e817b7108 ("mm: page table check")
> 
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> ---
>  include/linux/page_table_check.h | 18 ++++++++++++++++++
>  mm/khugepaged.c                  |  2 ++
>  mm/page_table_check.c            | 21 +++++++++++++++++++++
>  3 files changed, 41 insertions(+)
> 
> diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
> index 38cace1da7b6..e88bbe37727b 100644
> --- a/include/linux/page_table_check.h
> +++ b/include/linux/page_table_check.h
> @@ -26,6 +26,8 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
>  				pmd_t *pmdp, pmd_t pmd);
>  void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
>  				pud_t *pudp, pud_t pud);
> +void __page_table_check_pmd_clear_full(struct mm_struct *mm, unsigned long addr,
> +				       pmd_t pmd);
>  
>  static inline void page_table_check_alloc(struct page *page, unsigned int order)
>  {
> @@ -100,6 +102,16 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
>  	__page_table_check_pud_set(mm, addr, pudp, pud);
>  }
>  
> +static inline void page_table_check_pmd_clear_full(struct mm_struct *mm,
> +						   unsigned long addr,
> +						   pmd_t pmd)
> +{
> +	if (static_branch_likely(&page_table_check_disabled))
> +		return;
> +
> +	__page_table_check_pmd_clear_full(mm, addr, pmd);
> +}
> +
>  #else
>  
>  static inline void page_table_check_alloc(struct page *page, unsigned int order)
> @@ -143,5 +155,11 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
>  {
>  }
>  
> +static inline void page_table_check_pmd_clear_full(struct mm_struct *mm,
> +						   unsigned long addr,
> +						   pmd_t pmd)
> +{
> +}
> +
>  #endif /* CONFIG_PAGE_TABLE_CHECK */
>  #endif /* __LINUX_PAGE_TABLE_CHECK_H */
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 440112355ffe..eefe3706f6c2 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -16,6 +16,7 @@
>  #include <linux/hashtable.h>
>  #include <linux/userfaultfd_k.h>
>  #include <linux/page_idle.h>
> +#include <linux/page_table_check.h>
>  #include <linux/swapops.h>
>  #include <linux/shmem_fs.h>
>  
> @@ -1424,6 +1425,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
>  
>  	spin_unlock(ptl);
>  	mm_dec_nr_ptes(mm);
> +	page_table_check_pmd_clear_full(mm, addr, pmd);
>  	pte_free(mm, pmd_pgtable(pmd));
>  }

This looks right, I'm wondering if we want to add a 
mmap_assert_write_locked(mm) to collapse_and_free_pmd().

>  
> diff --git a/mm/page_table_check.c b/mm/page_table_check.c
> index c61d7ebe13b1..251f95a808b4 100644
> --- a/mm/page_table_check.c
> +++ b/mm/page_table_check.c
> @@ -247,3 +247,24 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
>  	}
>  }
>  EXPORT_SYMBOL(__page_table_check_pud_set);
> +
> +void __page_table_check_pmd_clear_full(struct mm_struct *mm, unsigned long addr,
> +				       pmd_t pmd)
> +{
> +	if (&init_mm == mm)
> +		return;
> +
> +	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
> +		pte_t *ptep = pte_offset_map(&pmd, addr);
> +		unsigned long i;
> +
> +		pte_unmap(ptep);
> +		for (i = 0; i < PTRS_PER_PTE; i++) {
> +			__page_table_check_pte_clear(mm, addr, *ptep);
> +			addr += PAGE_SIZE;
> +			ptep++;
> +		}
> +	} else {
> +		__page_table_check_pmd_clear(mm, addr, pmd);
> +	}
> +}
> -- 
> 2.35.0.rc0.227.g00780c9af4-goog
> 
>
Pasha Tatashin Jan. 26, 2022, 12:46 p.m. UTC | #2
On Wed, Jan 26, 2022 at 1:45 AM David Rientjes <rientjes@google.com> wrote:
>
> On Wed, 26 Jan 2022, Pasha Tatashin wrote:
>
> > syzbot detected a case where the page table counters were not properly
> > updated.
> >
>
> Is there a Reported-by tag that syzbot wants us to use to track this?

Google internal syzbot found it, so I did not add Reported-by as there
is no  public e-mail to reference. However, I added all the necessary
steps to repro this problem to the cover letter.

>
>
> This looks right, I'm wondering if we want to add a
> mmap_assert_write_locked(mm) to collapse_and_free_pmd().

Good idea, I will add it in the next revision.

Thanks,
Pasha
diff mbox series

Patch

diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 38cace1da7b6..e88bbe37727b 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -26,6 +26,8 @@  void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
 				pmd_t *pmdp, pmd_t pmd);
 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
 				pud_t *pudp, pud_t pud);
+void __page_table_check_pmd_clear_full(struct mm_struct *mm, unsigned long addr,
+				       pmd_t pmd);
 
 static inline void page_table_check_alloc(struct page *page, unsigned int order)
 {
@@ -100,6 +102,16 @@  static inline void page_table_check_pud_set(struct mm_struct *mm,
 	__page_table_check_pud_set(mm, addr, pudp, pud);
 }
 
+static inline void page_table_check_pmd_clear_full(struct mm_struct *mm,
+						   unsigned long addr,
+						   pmd_t pmd)
+{
+	if (static_branch_likely(&page_table_check_disabled))
+		return;
+
+	__page_table_check_pmd_clear_full(mm, addr, pmd);
+}
+
 #else
 
 static inline void page_table_check_alloc(struct page *page, unsigned int order)
@@ -143,5 +155,11 @@  static inline void page_table_check_pud_set(struct mm_struct *mm,
 {
 }
 
+static inline void page_table_check_pmd_clear_full(struct mm_struct *mm,
+						   unsigned long addr,
+						   pmd_t pmd)
+{
+}
+
 #endif /* CONFIG_PAGE_TABLE_CHECK */
 #endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 440112355ffe..eefe3706f6c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -16,6 +16,7 @@ 
 #include <linux/hashtable.h>
 #include <linux/userfaultfd_k.h>
 #include <linux/page_idle.h>
+#include <linux/page_table_check.h>
 #include <linux/swapops.h>
 #include <linux/shmem_fs.h>
 
@@ -1424,6 +1425,7 @@  static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
 
 	spin_unlock(ptl);
 	mm_dec_nr_ptes(mm);
+	page_table_check_pmd_clear_full(mm, addr, pmd);
 	pte_free(mm, pmd_pgtable(pmd));
 }
 
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index c61d7ebe13b1..251f95a808b4 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -247,3 +247,24 @@  void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
 	}
 }
 EXPORT_SYMBOL(__page_table_check_pud_set);
+
+void __page_table_check_pmd_clear_full(struct mm_struct *mm, unsigned long addr,
+				       pmd_t pmd)
+{
+	if (&init_mm == mm)
+		return;
+
+	if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
+		pte_t *ptep = pte_offset_map(&pmd, addr);
+		unsigned long i;
+
+		pte_unmap(ptep);
+		for (i = 0; i < PTRS_PER_PTE; i++) {
+			__page_table_check_pte_clear(mm, addr, *ptep);
+			addr += PAGE_SIZE;
+			ptep++;
+		}
+	} else {
+		__page_table_check_pmd_clear(mm, addr, pmd);
+	}
+}