@@ -153,7 +153,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = old_page,
+ .page = compound_head(old_page),
.vma = vma,
.address = addr,
};
@@ -165,8 +165,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
- VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);
-
if (!orig) {
err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
&memcg, false);
@@ -188,7 +186,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
get_page(new_page);
if (orig) {
+ lock_page(new_page); /* for page_add_file_rmap() */
page_add_file_rmap(new_page, false);
+ unlock_page(new_page);
inc_mm_counter(mm, mm_counter_file(new_page));
dec_mm_counter(mm, MM_ANONPAGES);
} else {
@@ -482,7 +482,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
retry:
/* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1,
- FOLL_FORCE | FOLL_SPLIT, &old_page, &vma, NULL);
+ FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
if (ret <= 0)
return ret;
This patches uses newly added FOLL_SPLIT_PMD in uprobe. This enables easy regroup of huge pmd after the uprobe is disabled (in next patch). Signed-off-by: Song Liu <songliubraving@fb.com> --- kernel/events/uprobes.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)