@@ -33,6 +33,9 @@ do { \
(b)->data = (tmp).data; \
} while (0)
+bool extable_insn_may_access_user(const struct exception_table_entry *ex,
+ unsigned long esr);
+
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);
@@ -20,6 +20,17 @@ static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex,
return !(uaccess_is_write ^ fault_on_write);
}
+bool extable_insn_may_access_user(const struct exception_table_entry *ex,
+ unsigned long esr)
+{
+ switch (ex->type) {
+ case EX_TYPE_UACCESS_CPY:
+ return cpy_faulted_on_uaccess(ex, esr);
+ default:
+ return true;
+ }
+}
+
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
@@ -553,6 +553,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma;
+ const struct exception_table_entry *ex;
int si_code;
int pkey = -1;
@@ -606,7 +607,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
die_kernel_fault("execution of user memory",
addr, esr, regs);
- if (!search_exception_tables(regs->pc))
+ ex = search_exception_tables(regs->pc);
+ if (!ex || !extable_insn_may_access_user(ex, esr))
die_kernel_fault("access to user memory outside uaccess routines",
addr, esr, regs);
}
A subsequent patch will use CPY* instructions to copy between user and kernel memory. Add handling for PAN faults caused by an intended kernel memory access erroneously accessing user memory, in order to make it easier to debug kernel bugs and to keep the same behavior as with regular loads/stores. Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com> --- arch/arm64/include/asm/extable.h | 3 +++ arch/arm64/mm/extable.c | 11 +++++++++++ arch/arm64/mm/fault.c | 4 +++- 3 files changed, 17 insertions(+), 1 deletion(-)