@@ -474,6 +474,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
struct page *old_page, *new_page;
struct vm_area_struct *vma;
int ret, is_register, ref_ctr_updated = 0;
+ struct page *orig_page = NULL;
is_register = is_swbp_insn(&opcode);
uprobe = container_of(auprobe, struct uprobe, arch);
@@ -512,7 +513,6 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
if (!is_register) {
- struct page *orig_page;
pgoff_t index;
index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
@@ -540,6 +540,10 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (ret && is_register && ref_ctr_updated)
update_ref_ctr(uprobe, mm, -1);
+ if (!ret && orig_page && PageTransCompound(orig_page))
+ set_bit(AS_COLLAPSE_PMD,
+ &compound_head(orig_page)->mapping->flags);
+
return ret;
}
@@ -1302,9 +1302,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
- /* probably overkill */
- if (vma->anon_vma)
- continue;
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (addr & ~HPAGE_PMD_MASK)
continue;
After all uprobes are removed from the huge page (with PTE pgtable), it is possible to collapse the pmd and benefit from THP again. This patch does the collapse by setting AS_COLLAPSE_PMD. khugepage would retrace the page table. A check for vma->anon_vma is removed from retract_page_tables(). The check was initially marked as "probably overkill". The code works well without the check. An issue on earlier version was discovered by kbuild test robot. Reported-by: kbuild test robot <lkp@intel.com> Signed-off-by: Song Liu <songliubraving@fb.com> --- kernel/events/uprobes.c | 6 +++++- mm/khugepaged.c | 3 --- 2 files changed, 5 insertions(+), 4 deletions(-)