diff mbox series

[3/4] mm: mremap: use flush_pud_tlb_range in move_normal_pud()

Message ID 20230731074829.79309-4-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: mremap: fix move page tables | expand

Commit Message

Kefeng Wang July 31, 2023, 7:48 a.m. UTC
Archs may need to do special things when flushing thp tlb,
so use the more applicable flush_pud_tlb_range() instead of
flush_tlb_range().

Fixes: c49dd3401802 ("mm: speedup mremap on 1GB or larger regions")
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/mremap.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

Comments

kernel test robot July 31, 2023, 4:42 p.m. UTC | #1
Hi Kefeng,

kernel test robot noticed the following build errors:

[auto build test ERROR on arm64/for-next/core]
[also build test ERROR on arm-perf/for-next/perf linus/master v6.5-rc4 next-20230731]
[cannot apply to akpm-mm/mm-everything]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kefeng-Wang/mm-hugetlb-use-flush_hugetlb_tlb_range-in-move_hugetlb_page_tables/20230731-154016
base:   https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core
patch link:    https://lore.kernel.org/r/20230731074829.79309-4-wangkefeng.wang%40huawei.com
patch subject: [PATCH 3/4] mm: mremap: use flush_pud_tlb_range in move_normal_pud()
config: riscv-allmodconfig (https://download.01.org/0day-ci/archive/20230801/202308010022.uY01vAew-lkp@intel.com/config)
compiler: riscv64-linux-gcc (GCC) 12.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230801/202308010022.uY01vAew-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202308010022.uY01vAew-lkp@intel.com/

All errors (new ones prefixed by >>):

   mm/mremap.c: In function 'move_normal_pud':
>> mm/mremap.c:336:9: error: implicit declaration of function 'flush_pud_tlb_range'; did you mean 'flush_pmd_tlb_range'? [-Werror=implicit-function-declaration]
     336 |         flush_pud_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
         |         ^~~~~~~~~~~~~~~~~~~
         |         flush_pmd_tlb_range
   cc1: some warnings being treated as errors


vim +336 mm/mremap.c

   302	
   303	#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
   304	static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
   305			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
   306	{
   307		spinlock_t *old_ptl, *new_ptl;
   308		struct mm_struct *mm = vma->vm_mm;
   309		pud_t pud;
   310	
   311		if (!arch_supports_page_table_move())
   312			return false;
   313		/*
   314		 * The destination pud shouldn't be established, free_pgtables()
   315		 * should have released it.
   316		 */
   317		if (WARN_ON_ONCE(!pud_none(*new_pud)))
   318			return false;
   319	
   320		/*
   321		 * We don't have to worry about the ordering of src and dst
   322		 * ptlocks because exclusive mmap_lock prevents deadlock.
   323		 */
   324		old_ptl = pud_lock(vma->vm_mm, old_pud);
   325		new_ptl = pud_lockptr(mm, new_pud);
   326		if (new_ptl != old_ptl)
   327			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
   328	
   329		/* Clear the pud */
   330		pud = *old_pud;
   331		pud_clear(old_pud);
   332	
   333		VM_BUG_ON(!pud_none(*new_pud));
   334	
   335		pud_populate(mm, new_pud, pud_pgtable(pud));
 > 336		flush_pud_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
   337		if (new_ptl != old_ptl)
   338			spin_unlock(new_ptl);
   339		spin_unlock(old_ptl);
   340	
   341		return true;
   342	}
   343	#else
   344	static inline bool move_normal_pud(struct vm_area_struct *vma,
   345			unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
   346			pud_t *new_pud)
   347	{
   348		return false;
   349	}
   350	#endif
   351
diff mbox series

Patch

diff --git a/mm/mremap.c b/mm/mremap.c
index 1883205fa22b..25114e56901f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -333,7 +333,7 @@  static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
 	VM_BUG_ON(!pud_none(*new_pud));
 
 	pud_populate(mm, new_pud, pud_pgtable(pud));
-	flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
+	flush_pud_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
 	if (new_ptl != old_ptl)
 		spin_unlock(new_ptl);
 	spin_unlock(old_ptl);