diff mbox series

[RFC,11/24] x86 fault handler: merge bad_area() functions

Message ID 20200224203057.162467-12-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Fine grained MM locking | expand

Commit Message

Michel Lespinasse Feb. 24, 2020, 8:30 p.m. UTC
This merges the bad_area(), bad_area_access_error() and the underlying
__bad_area() functions into one single unified function.

Passing a NULL vma triggers the prior bad_area() behavior, while
passing a non-NULL vma triggers the prior bad_area_access_error() behavior.

The control flow is very similar in all cases, and we now release the
mmap_sem read lock in one single place rather than 3.

Text size is reduced by 356 bytes here.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 arch/x86/mm/fault.c | 54 ++++++++++++++++++++-------------------------
 1 file changed, 24 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git arch/x86/mm/fault.c arch/x86/mm/fault.c
index a8ce9e160b72..adbd2b03fcf9 100644
--- arch/x86/mm/fault.c
+++ arch/x86/mm/fault.c
@@ -919,26 +919,6 @@  bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
 }
 
-static void
-__bad_area(struct pt_regs *regs, unsigned long error_code,
-	   unsigned long address, u32 pkey, int si_code)
-{
-	struct mm_struct *mm = current->mm;
-	/*
-	 * Something tried to access memory that isn't in our memory map..
-	 * Fix it, but check if it's kernel or user first..
-	 */
-	mm_read_unlock(mm);
-
-	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
-}
-
-static noinline void
-bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
-{
-	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
-}
-
 static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 		struct vm_area_struct *vma)
 {
@@ -957,9 +937,15 @@  static inline bool bad_area_access_from_pkeys(unsigned long error_code,
 }
 
 static noinline void
-bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
-		      unsigned long address, struct vm_area_struct *vma)
+bad_area(struct pt_regs *regs, unsigned long error_code,
+	 unsigned long address, struct vm_area_struct *vma)
 {
+	u32 pkey = 0;
+	int si_code = SEGV_MAPERR;
+
+	if (!vma)
+		goto unlock;
+
 	/*
 	 * This OSPKE check is not strictly necessary at runtime.
 	 * But, doing it this way allows compiler optimizations
@@ -986,12 +972,20 @@  bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 		 *	     faulted on a pte with its pkey=4.
 		 */
-		u32 pkey = vma_pkey(vma);
-
-		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
+		pkey = vma_pkey(vma);
+		si_code = SEGV_PKUERR;
 	} else {
-		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
+		si_code = SEGV_ACCERR;
 	}
+
+unlock:
+	/*
+	 * Something tried to access memory that isn't in our memory map..
+	 * Fix it, but check if it's kernel or user first..
+	 */
+	mm_read_unlock(current->mm);
+
+	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
 }
 
 static void
@@ -1401,17 +1395,17 @@  void do_user_addr_fault(struct pt_regs *regs,
 
 	vma = find_vma(mm, address);
 	if (unlikely(!vma)) {
-		bad_area(regs, hw_error_code, address);
+		bad_area(regs, hw_error_code, address, NULL);
 		return;
 	}
 	if (likely(vma->vm_start <= address))
 		goto good_area;
 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-		bad_area(regs, hw_error_code, address);
+		bad_area(regs, hw_error_code, address, NULL);
 		return;
 	}
 	if (unlikely(expand_stack(vma, address))) {
-		bad_area(regs, hw_error_code, address);
+		bad_area(regs, hw_error_code, address, NULL);
 		return;
 	}
 
@@ -1421,7 +1415,7 @@  void do_user_addr_fault(struct pt_regs *regs,
 	 */
 good_area:
 	if (unlikely(access_error(hw_error_code, vma))) {
-		bad_area_access_error(regs, hw_error_code, address, vma);
+		bad_area(regs, hw_error_code, address, vma);
 		return;
 	}