Message ID | 20240403083805.1818160-5-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | arch/mm/fault: accelerate pagefault when badaccess | expand |
Kefeng Wang <wangkefeng.wang@huawei.com> writes: > The access_[pkey]_error() of vma already checked under per-VMA lock, if > it is a bad access, directly handle error, no need to retry with mmap_lock > again. In order to release the correct lock, pass the mm_struct into > bad_access_pkey()/bad_access(), if mm is NULL, release vma lock, or > release mmap_lock. Since the page faut is handled under per-VMA lock, > count it as a vma lock event with VMA_LOCK_SUCCESS. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > arch/powerpc/mm/fault.c | 33 ++++++++++++++++++++------------- > 1 file changed, 20 insertions(+), 13 deletions(-) I thought there might be a nicer way to do this, plumbing the mm and vma down through all those levels is a bit of a pain (vma->vm_mm exists after all). But I couldn't come up with anything obviously better, without doing lots of refactoring first, which would be a pain to integrate into this series. So anyway, if the series goes ahead: Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) cheers > diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c > index 53335ae21a40..215690452495 100644 > --- a/arch/powerpc/mm/fault.c > +++ b/arch/powerpc/mm/fault.c > @@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add > return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); > } > > -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) > +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, > + struct mm_struct *mm, struct vm_area_struct *vma) > { > - struct mm_struct *mm = current->mm; > > /* > * Something tried to access memory that isn't in our memory map.. > * Fix it, but check if it's kernel or user first.. > */ > - mmap_read_unlock(mm); > + if (mm) > + mmap_read_unlock(mm); > + else > + vma_end_read(vma); > > return __bad_area_nosemaphore(regs, address, si_code); > } > > static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, > + struct mm_struct *mm, > struct vm_area_struct *vma) > { > - struct mm_struct *mm = current->mm; > int pkey; > > /* > @@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, > */ > pkey = vma_pkey(vma); > > - mmap_read_unlock(mm); > + if (mm) > + mmap_read_unlock(mm); > + else > + vma_end_read(vma); > > /* > * If we are in kernel mode, bail out with a SEGV, this will > @@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, > return 0; > } > > -static noinline int bad_access(struct pt_regs *regs, unsigned long address) > +static noinline int bad_access(struct pt_regs *regs, unsigned long address, > + struct mm_struct *mm, struct vm_area_struct *vma) > { > - return __bad_area(regs, address, SEGV_ACCERR); > + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); > } > > static int do_sigbus(struct pt_regs *regs, unsigned long address, > @@ -479,13 +486,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, > > if (unlikely(access_pkey_error(is_write, is_exec, > (error_code & DSISR_KEYFAULT), vma))) { > - vma_end_read(vma); > - goto lock_mmap; > + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); > + return bad_access_pkey(regs, address, NULL, vma); > } > > if (unlikely(access_error(is_write, is_exec, vma))) { > - vma_end_read(vma); > - goto lock_mmap; > + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); > + return bad_access(regs, address, NULL, vma); > } > > fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); > @@ -521,10 +528,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, > > if (unlikely(access_pkey_error(is_write, is_exec, > (error_code & DSISR_KEYFAULT), vma))) > - return bad_access_pkey(regs, address, vma); > + return bad_access_pkey(regs, address, mm, vma); > > if (unlikely(access_error(is_write, is_exec, vma))) > - return bad_access(regs, address); > + return bad_access(regs, address, mm, vma); > > /* > * If for any reason at all we couldn't handle the fault, > -- > 2.27.0 > > > _______________________________________________ > linux-riscv mailing list > linux-riscv@lists.infradead.org > http://lists.infradead.org/mailman/listinfo/linux-riscv
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 53335ae21a40..215690452495 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; int pkey; /* @@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, return 0; } -static noinline int bad_access(struct pt_regs *regs, unsigned long address) +static noinline int bad_access(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - return __bad_area(regs, address, SEGV_ACCERR); + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); } static int do_sigbus(struct pt_regs *regs, unsigned long address, @@ -479,13 +486,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access_pkey(regs, address, NULL, vma); } if (unlikely(access_error(is_write, is_exec, vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access(regs, address, NULL, vma); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); @@ -521,10 +528,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) - return bad_access_pkey(regs, address, vma); + return bad_access_pkey(regs, address, mm, vma); if (unlikely(access_error(is_write, is_exec, vma))) - return bad_access(regs, address); + return bad_access(regs, address, mm, vma); /* * If for any reason at all we couldn't handle the fault,
The access_[pkey]_error() of vma already checked under per-VMA lock, if it is a bad access, directly handle error, no need to retry with mmap_lock again. In order to release the correct lock, pass the mm_struct into bad_access_pkey()/bad_access(), if mm is NULL, release vma lock, or release mmap_lock. Since the page faut is handled under per-VMA lock, count it as a vma lock event with VMA_LOCK_SUCCESS. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/powerpc/mm/fault.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-)