Message ID | 11dbe3ac88130dbd2b8554f9369cd93fe138c655.1744037648.git.agordeev@linux.ibm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: Fix apply_to_pte_range() vs lazy MMU mode | expand |
On Tue Apr 8, 2025 at 1:11 AM AEST, Alexander Gordeev wrote: > The lazy MMU mode can only be entered and left under the protection > of the page table locks for all page tables which may be modified. > Yet, when it comes to kernel mappings apply_to_pte_range() does not > take any locks. That does not conform arch_enter|leave_lazy_mmu_mode() > semantics and could potentially lead to re-schedulling a process while > in lazy MMU mode or racing on a kernel page table updates. > > Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> > --- > mm/kasan/shadow.c | 7 ++----- > mm/memory.c | 5 ++++- > 2 files changed, 6 insertions(+), 6 deletions(-) > > diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c > index edfa77959474..6531a7aa8562 100644 > --- a/mm/kasan/shadow.c > +++ b/mm/kasan/shadow.c > @@ -308,14 +308,14 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, > __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); > pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); > > - spin_lock(&init_mm.page_table_lock); > if (likely(pte_none(ptep_get(ptep)))) { > set_pte_at(&init_mm, addr, ptep, pte); > page = 0; > } > - spin_unlock(&init_mm.page_table_lock); > + > if (page) > free_page(page); > + > return 0; > } > kasan_populate_vmalloc_pte() is really the only thing that takes the ptl in the apply_to_page_range fn()... Looks like you may be right. I wonder why they do and nobody else? Just luck? Seems okay. Reviewed-by: Nicholas Piggin <npiggin@gmail.com> > @@ -401,13 +401,10 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, > > page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); > > - spin_lock(&init_mm.page_table_lock); > - > if (likely(!pte_none(ptep_get(ptep)))) { > pte_clear(&init_mm, addr, ptep); > free_page(page); > } > - spin_unlock(&init_mm.page_table_lock); > > return 0; > } > diff --git a/mm/memory.c b/mm/memory.c > index f0201c8ec1ce..1f3727104e99 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2926,6 +2926,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, > pte = pte_offset_kernel(pmd, addr); > if (!pte) > return err; > + spin_lock(&init_mm.page_table_lock); > } else { > if (create) > pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); > @@ -2951,7 +2952,9 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, > > arch_leave_lazy_mmu_mode(); > > - if (mm != &init_mm) > + if (mm == &init_mm) > + spin_unlock(&init_mm.page_table_lock); > + else > pte_unmap_unlock(mapped_pte, ptl); > > *mask |= PGTBL_PTE_MODIFIED;
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index edfa77959474..6531a7aa8562 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -308,14 +308,14 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); - spin_lock(&init_mm.page_table_lock); if (likely(pte_none(ptep_get(ptep)))) { set_pte_at(&init_mm, addr, ptep, pte); page = 0; } - spin_unlock(&init_mm.page_table_lock); + if (page) free_page(page); + return 0; } @@ -401,13 +401,10 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); - spin_lock(&init_mm.page_table_lock); - if (likely(!pte_none(ptep_get(ptep)))) { pte_clear(&init_mm, addr, ptep); free_page(page); } - spin_unlock(&init_mm.page_table_lock); return 0; } diff --git a/mm/memory.c b/mm/memory.c index f0201c8ec1ce..1f3727104e99 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2926,6 +2926,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, pte = pte_offset_kernel(pmd, addr); if (!pte) return err; + spin_lock(&init_mm.page_table_lock); } else { if (create) pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); @@ -2951,7 +2952,9 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, arch_leave_lazy_mmu_mode(); - if (mm != &init_mm) + if (mm == &init_mm) + spin_unlock(&init_mm.page_table_lock); + else pte_unmap_unlock(mapped_pte, ptl); *mask |= PGTBL_PTE_MODIFIED;
The lazy MMU mode can only be entered and left under the protection of the page table locks for all page tables which may be modified. Yet, when it comes to kernel mappings apply_to_pte_range() does not take any locks. That does not conform arch_enter|leave_lazy_mmu_mode() semantics and could potentially lead to re-schedulling a process while in lazy MMU mode or racing on a kernel page table updates. Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> --- mm/kasan/shadow.c | 7 ++----- mm/memory.c | 5 ++++- 2 files changed, 6 insertions(+), 6 deletions(-)