diff mbox

[v12,06/10] arm64: Treat all entry code as non-kprobe-able

Message ID 1461783185-9056-7-git-send-email-dave.long@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

David Long April 27, 2016, 6:53 p.m. UTC
From: Pratyush Anand <panand@redhat.com>

Entry symbols are not kprobe safe. So blacklist them for kprobing.

Signed-off-by: Pratyush Anand <panand@redhat.com>
---
 arch/arm64/kernel/entry.S       |  3 +++
 arch/arm64/kernel/kprobes.c     | 10 ++++++++++
 arch/arm64/kernel/vmlinux.lds.S |  1 +
 3 files changed, 14 insertions(+)

Comments

James Morse May 12, 2016, 2:49 p.m. UTC | #1
Hi David, Pratyush

On 27/04/16 19:53, David Long wrote:
> From: Pratyush Anand <panand@redhat.com>
> 
> Entry symbols are not kprobe safe. So blacklist them for kprobing.
> 
> Signed-off-by: Pratyush Anand <panand@redhat.com>

> diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
> index dfa1b1f..6a1292b 100644
> --- a/arch/arm64/kernel/kprobes.c
> +++ b/arch/arm64/kernel/kprobes.c
> @@ -29,6 +29,7 @@
>  #include <asm/system_misc.h>
>  #include <asm/insn.h>
>  #include <asm/uaccess.h>
> +#include <asm-generic/sections.h>
>  
>  #include "kprobes-arm64.h"
>  
> @@ -514,6 +515,15 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
>  	return 1;
>  }
>  
> +bool arch_within_kprobe_blacklist(unsigned long addr)
> +{
> +	return  (addr >= (unsigned long)__kprobes_text_start &&
> +		 addr < (unsigned long)__kprobes_text_end) ||
> +		(addr >= (unsigned long)__entry_text_start &&
> +		 addr < (unsigned long)__entry_text_end) ||
> +		 !!search_exception_tables(addr);
> +}
> +

Looking at __kvm_hyp_vector, we don't have support for handling breakpoints at
EL2, so we should forbid kprobing these address ranges too:
__hyp_text_start -> __hyp_text_end
__hyp_idmap_text_start -> __hyp_idmap_text_end

These can probably be guarded with is_kernel_in_hyp_mode(), if this is true then
we are running with VHE where this code runs at the same exception level as the
rest of the kernel, so we can probe them. (In this case you may want to add
'eret' to aarch64_insn_is_branch() in patch 2)


Probing things in the kernel idmap sounds dangerous! Lets blacklist that too:
__idmap_text_start -> __idmap_text_end



Thanks,

James
David Long May 20, 2016, 5:28 a.m. UTC | #2
On 05/12/2016 10:49 AM, James Morse wrote:
> Hi David, Pratyush
>
> On 27/04/16 19:53, David Long wrote:
>> From: Pratyush Anand <panand@redhat.com>
>>
>> Entry symbols are not kprobe safe. So blacklist them for kprobing.
>>
>> Signed-off-by: Pratyush Anand <panand@redhat.com>
>
>> diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
>> index dfa1b1f..6a1292b 100644
>> --- a/arch/arm64/kernel/kprobes.c
>> +++ b/arch/arm64/kernel/kprobes.c
>> @@ -29,6 +29,7 @@
>>   #include <asm/system_misc.h>
>>   #include <asm/insn.h>
>>   #include <asm/uaccess.h>
>> +#include <asm-generic/sections.h>
>>
>>   #include "kprobes-arm64.h"
>>
>> @@ -514,6 +515,15 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
>>   	return 1;
>>   }
>>
>> +bool arch_within_kprobe_blacklist(unsigned long addr)
>> +{
>> +	return  (addr >= (unsigned long)__kprobes_text_start &&
>> +		 addr < (unsigned long)__kprobes_text_end) ||
>> +		(addr >= (unsigned long)__entry_text_start &&
>> +		 addr < (unsigned long)__entry_text_end) ||
>> +		 !!search_exception_tables(addr);
>> +}
>> +
>
> Looking at __kvm_hyp_vector, we don't have support for handling breakpoints at
> EL2, so we should forbid kprobing these address ranges too:
> __hyp_text_start -> __hyp_text_end
> __hyp_idmap_text_start -> __hyp_idmap_text_end
>
> These can probably be guarded with is_kernel_in_hyp_mode(), if this is true then
> we are running with VHE where this code runs at the same exception level as the
> rest of the kernel, so we can probe them. (In this case you may want to add
> 'eret' to aarch64_insn_is_branch() in patch 2)
>

OK.

>
> Probing things in the kernel idmap sounds dangerous! Lets blacklist that too:
> __idmap_text_start -> __idmap_text_end
>

OK.

>
>
> Thanks,
>
> James
>
David Long May 26, 2016, 3:26 p.m. UTC | #3
On 05/12/2016 10:49 AM, James Morse wrote:
> Hi David, Pratyush
>
> On 27/04/16 19:53, David Long wrote:
>> From: Pratyush Anand <panand@redhat.com>
>>
>> Entry symbols are not kprobe safe. So blacklist them for kprobing.
>>
>> Signed-off-by: Pratyush Anand <panand@redhat.com>
>
>> diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
>> index dfa1b1f..6a1292b 100644
>> --- a/arch/arm64/kernel/kprobes.c
>> +++ b/arch/arm64/kernel/kprobes.c
>> @@ -29,6 +29,7 @@
>>   #include <asm/system_misc.h>
>>   #include <asm/insn.h>
>>   #include <asm/uaccess.h>
>> +#include <asm-generic/sections.h>
>>
>>   #include "kprobes-arm64.h"
>>
>> @@ -514,6 +515,15 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
>>   	return 1;
>>   }
>>
>> +bool arch_within_kprobe_blacklist(unsigned long addr)
>> +{
>> +	return  (addr >= (unsigned long)__kprobes_text_start &&
>> +		 addr < (unsigned long)__kprobes_text_end) ||
>> +		(addr >= (unsigned long)__entry_text_start &&
>> +		 addr < (unsigned long)__entry_text_end) ||
>> +		 !!search_exception_tables(addr);
>> +}
>> +
>
> Looking at __kvm_hyp_vector, we don't have support for handling breakpoints at
> EL2, so we should forbid kprobing these address ranges too:
> __hyp_text_start -> __hyp_text_end
> __hyp_idmap_text_start -> __hyp_idmap_text_end
>
> These can probably be guarded with is_kernel_in_hyp_mode(), if this is true then
> we are running with VHE where this code runs at the same exception level as the
> rest of the kernel, so we can probe them. (In this case you may want to add
> 'eret' to aarch64_insn_is_branch() in patch 2)
>
>
> Probing things in the kernel idmap sounds dangerous! Lets blacklist that too:
> __idmap_text_start -> __idmap_text_end
>

I've made these changes.  I noticed there's no include file definitions 
for these symbols so I've added local extern declarations in 
arch_within_kprobe_blacklist().

Thanks,
-dl
diff mbox

Patch

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 12e8d2b..7d99bed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -243,6 +243,7 @@  tsk	.req	x28		// current thread_info
  * Exception vectors.
  */
 
+	.pushsection ".entry.text", "ax"
 	.align	11
 ENTRY(vectors)
 	ventry	el1_sync_invalid		// Synchronous EL1t
@@ -781,3 +782,5 @@  ENTRY(sys_rt_sigreturn_wrapper)
 	mov	x0, sp
 	b	sys_rt_sigreturn
 ENDPROC(sys_rt_sigreturn_wrapper)
+
+	.popsection
diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
index dfa1b1f..6a1292b 100644
--- a/arch/arm64/kernel/kprobes.c
+++ b/arch/arm64/kernel/kprobes.c
@@ -29,6 +29,7 @@ 
 #include <asm/system_misc.h>
 #include <asm/insn.h>
 #include <asm/uaccess.h>
+#include <asm-generic/sections.h>
 
 #include "kprobes-arm64.h"
 
@@ -514,6 +515,15 @@  int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 	return 1;
 }
 
+bool arch_within_kprobe_blacklist(unsigned long addr)
+{
+	return  (addr >= (unsigned long)__kprobes_text_start &&
+		 addr < (unsigned long)__kprobes_text_end) ||
+		(addr >= (unsigned long)__entry_text_start &&
+		 addr < (unsigned long)__entry_text_end) ||
+		 !!search_exception_tables(addr);
+}
+
 int __init arch_init_kprobes(void)
 {
 	return 0;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index e205789..fb68ff8 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -104,6 +104,7 @@  SECTIONS
 			__exception_text_end = .;
 			IRQENTRY_TEXT
 			SOFTIRQENTRY_TEXT
+			ENTRY_TEXT
 			TEXT_TEXT
 			SCHED_TEXT
 			LOCK_TEXT