diff mbox

arm64: kasan: instrument user memory access API

Message ID 1464288231-11304-1-git-send-email-yang.shi@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Yang Shi May 26, 2016, 6:43 p.m. UTC
The upstream commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
("x86/kasan: instrument user memory access API") added KASAN instrument to
x86 user memory access API, so added such instrument to ARM64 too.

Tested by test_kasan module.

Signed-off-by: Yang Shi <yang.shi@linaro.org>
---
 arch/arm64/include/asm/uaccess.h | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

Comments

Andrey Ryabinin May 27, 2016, 11:02 a.m. UTC | #1
On 05/26/2016 09:43 PM, Yang Shi wrote:
> The upstream commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
> ("x86/kasan: instrument user memory access API") added KASAN instrument to
> x86 user memory access API, so added such instrument to ARM64 too.
> 
> Tested by test_kasan module.
> 
> Signed-off-by: Yang Shi <yang.shi@linaro.org>
> ---
>  arch/arm64/include/asm/uaccess.h | 18 ++++++++++++++++--
>  1 file changed, 16 insertions(+), 2 deletions(-)

Please, cover __copy_from_user() and __copy_to_user() too.
Unlike x86, your patch doesn't instrument these two.
Mark Rutland May 27, 2016, 12:38 p.m. UTC | #2
Hi,

On Thu, May 26, 2016 at 11:43:51AM -0700, Yang Shi wrote:
> The upstream commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
> ("x86/kasan: instrument user memory access API") added KASAN instrument to
> x86 user memory access API, so added such instrument to ARM64 too.
> 
> Tested by test_kasan module.

I just gave this a go atop of the current HEAD (dc03c0f9d12d8528) on a
Juno R1 board. I hit the expected exceptions when using the test_kasan
module (once I remembered to rebuild it), and things seem to run
smoothly otherwise.

I don't see any built issues when !CONFIG_KASAN, and the patch itself
looks right to me.

So FWIW:

Acked-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>

As an aside, it's a shame that each architecture has to duplicate this
logic, rather than having something in the generic code like:

static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
	kasan_check_read(from, n);
	arch_copy_from_user(to, from, n);
}

Thanks,
Mark.

> 
> Signed-off-by: Yang Shi <yang.shi@linaro.org>
> ---
>  arch/arm64/include/asm/uaccess.h | 18 ++++++++++++++++--
>  1 file changed, 16 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
> index 0685d74..ec352fa 100644
> --- a/arch/arm64/include/asm/uaccess.h
> +++ b/arch/arm64/include/asm/uaccess.h
> @@ -23,6 +23,7 @@
>   */
>  #include <linux/string.h>
>  #include <linux/thread_info.h>
> +#include <linux/kasan-checks.h>
>  
>  #include <asm/alternative.h>
>  #include <asm/cpufeature.h>
> @@ -276,6 +277,8 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
>  
>  static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
>  {
> +	kasan_check_write(to, n);
> +
>  	if (access_ok(VERIFY_READ, from, n))
>  		n = __copy_from_user(to, from, n);
>  	else /* security hole - plug it */
> @@ -285,6 +288,8 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
>  
>  static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
>  {
> +	kasan_check_read(from, n);
> +
>  	if (access_ok(VERIFY_WRITE, to, n))
>  		n = __copy_to_user(to, from, n);
>  	return n;
> @@ -297,8 +302,17 @@ static inline unsigned long __must_check copy_in_user(void __user *to, const voi
>  	return n;
>  }
>  
> -#define __copy_to_user_inatomic __copy_to_user
> -#define __copy_from_user_inatomic __copy_from_user
> +static inline unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
> +{
> +	kasan_check_read(from, n);
> +	return __copy_to_user(to, from, n);
> +}
> +
> +static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
> +{
> +	kasan_check_write(to, n);
> +	return __copy_from_user(to, from, n);
> +}
>  
>  static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
>  {
> -- 
> 2.0.2
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Yang Shi May 27, 2016, 4:34 p.m. UTC | #3
On 5/27/2016 4:02 AM, Andrey Ryabinin wrote:
>
>
> On 05/26/2016 09:43 PM, Yang Shi wrote:
>> The upstream commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
>> ("x86/kasan: instrument user memory access API") added KASAN instrument to
>> x86 user memory access API, so added such instrument to ARM64 too.
>>
>> Tested by test_kasan module.
>>
>> Signed-off-by: Yang Shi <yang.shi@linaro.org>
>> ---
>>  arch/arm64/include/asm/uaccess.h | 18 ++++++++++++++++--
>>  1 file changed, 16 insertions(+), 2 deletions(-)
>
> Please, cover __copy_from_user() and __copy_to_user() too.
> Unlike x86, your patch doesn't instrument these two.

I should elaborated this in my review. Yes, I did think about it, but 
unlike x86, __copy_to/from_user are implemented by asm code on ARM64. If 
I add kasan_check_read/write into them, I have to move the registers 
around to prepare the parameters for kasan calls, then restore them 
after the call, for example the below code for __copy_to_user:

         mov     x9, x0
         mov     x10, x1
         mov     x11, x2
         mov     x0, x10
         mov     x1, x11
         bl      kasan_check_read
         mov     x0, x9
         mov     x1, x10


So, I'm wondering if it is worth or not since __copy_to/from_user are 
just called at a couple of places, i.e. sctp, a couple of drivers, etc 
and not used too much. Actually, I think some of them could be replaced 
by __copy_to/from_user_inatomic.

Any idea is appreciated.

Thanks,
Yang

>
Mark Rutland May 27, 2016, 5:46 p.m. UTC | #4
On Fri, May 27, 2016 at 09:34:03AM -0700, Shi, Yang wrote:
> On 5/27/2016 4:02 AM, Andrey Ryabinin wrote:
> >
> >
> >On 05/26/2016 09:43 PM, Yang Shi wrote:
> >>The upstream commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
> >>("x86/kasan: instrument user memory access API") added KASAN instrument to
> >>x86 user memory access API, so added such instrument to ARM64 too.
> >>
> >>Tested by test_kasan module.
> >>
> >>Signed-off-by: Yang Shi <yang.shi@linaro.org>
> >>---
> >> arch/arm64/include/asm/uaccess.h | 18 ++++++++++++++++--
> >> 1 file changed, 16 insertions(+), 2 deletions(-)
> >
> >Please, cover __copy_from_user() and __copy_to_user() too.
> >Unlike x86, your patch doesn't instrument these two.

Argh, I missed those when reviewing. My bad.

> I should elaborated this in my review. Yes, I did think about it,
> but unlike x86, __copy_to/from_user are implemented by asm code on
> ARM64. If I add kasan_check_read/write into them, I have to move the
> registers around to prepare the parameters for kasan calls, then
> restore them after the call, for example the below code for
> __copy_to_user:
> 
>         mov     x9, x0
>         mov     x10, x1
>         mov     x11, x2
>         mov     x0, x10
>         mov     x1, x11
>         bl      kasan_check_read
>         mov     x0, x9
>         mov     x1, x10

There's no need to alter the assembly.

Rename the functions (e.g. have __arch_raw_copy_from_user), and add
static inline wrappers in uaccess.h that do the kasan calls before
calling the assembly functions.

That gives the compiler the freedom to do the right thing, and avoids
horrible ifdeffery in the assembly code.

> So, I'm wondering if it is worth or not since __copy_to/from_user
> are just called at a couple of places, i.e. sctp, a couple of
> drivers, etc and not used too much.

[mark@leverpostej:~/src/linux]% git grep -w __copy_to_user -- ^arch | wc -l
63
[mark@leverpostej:~/src/linux]% git grep -w __copy_from_user -- ^arch | wc -l
47

That's a reasonable number of callsites.

If we're going to bother adding this, it should be complete. So please
do update __copy_from_user and __copy_to_user.

> Actually, I think some of them
> could be replaced by __copy_to/from_user_inatomic.

Given the number of existing callers outside of arch code, I think we'll
get far more traction reworking the arm64 parts for now.

Thanks,
Mark.
Yang Shi May 27, 2016, 6:05 p.m. UTC | #5
On 5/27/2016 10:46 AM, Mark Rutland wrote:
> On Fri, May 27, 2016 at 09:34:03AM -0700, Shi, Yang wrote:
>> On 5/27/2016 4:02 AM, Andrey Ryabinin wrote:
>>>
>>>
>>> On 05/26/2016 09:43 PM, Yang Shi wrote:
>>>> The upstream commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
>>>> ("x86/kasan: instrument user memory access API") added KASAN instrument to
>>>> x86 user memory access API, so added such instrument to ARM64 too.
>>>>
>>>> Tested by test_kasan module.
>>>>
>>>> Signed-off-by: Yang Shi <yang.shi@linaro.org>
>>>> ---
>>>> arch/arm64/include/asm/uaccess.h | 18 ++++++++++++++++--
>>>> 1 file changed, 16 insertions(+), 2 deletions(-)
>>>
>>> Please, cover __copy_from_user() and __copy_to_user() too.
>>> Unlike x86, your patch doesn't instrument these two.
>
> Argh, I missed those when reviewing. My bad.
>
>> I should elaborated this in my review. Yes, I did think about it,
>> but unlike x86, __copy_to/from_user are implemented by asm code on
>> ARM64. If I add kasan_check_read/write into them, I have to move the
>> registers around to prepare the parameters for kasan calls, then
>> restore them after the call, for example the below code for
>> __copy_to_user:
>>
>>         mov     x9, x0
>>         mov     x10, x1
>>         mov     x11, x2
>>         mov     x0, x10
>>         mov     x1, x11
>>         bl      kasan_check_read
>>         mov     x0, x9
>>         mov     x1, x10
>
> There's no need to alter the assembly.
>
> Rename the functions (e.g. have __arch_raw_copy_from_user), and add
> static inline wrappers in uaccess.h that do the kasan calls before
> calling the assembly functions.
>
> That gives the compiler the freedom to do the right thing, and avoids
> horrible ifdeffery in the assembly code.

Thanks for the suggestion, will address in v2.

Yang

>
>> So, I'm wondering if it is worth or not since __copy_to/from_user
>> are just called at a couple of places, i.e. sctp, a couple of
>> drivers, etc and not used too much.
>
> [mark@leverpostej:~/src/linux]% git grep -w __copy_to_user -- ^arch | wc -l
> 63
> [mark@leverpostej:~/src/linux]% git grep -w __copy_from_user -- ^arch | wc -l
> 47
>
> That's a reasonable number of callsites.
>
> If we're going to bother adding this, it should be complete. So please
> do update __copy_from_user and __copy_to_user.
>
>> Actually, I think some of them
>> could be replaced by __copy_to/from_user_inatomic.
>
> Given the number of existing callers outside of arch code, I think we'll
> get far more traction reworking the arm64 parts for now.
>
> Thanks,
> Mark.
>
diff mbox

Patch

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0685d74..ec352fa 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -23,6 +23,7 @@ 
  */
 #include <linux/string.h>
 #include <linux/thread_info.h>
+#include <linux/kasan-checks.h>
 
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
@@ -276,6 +277,8 @@  extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+	kasan_check_write(to, n);
+
 	if (access_ok(VERIFY_READ, from, n))
 		n = __copy_from_user(to, from, n);
 	else /* security hole - plug it */
@@ -285,6 +288,8 @@  static inline unsigned long __must_check copy_from_user(void *to, const void __u
 
 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+	kasan_check_read(from, n);
+
 	if (access_ok(VERIFY_WRITE, to, n))
 		n = __copy_to_user(to, from, n);
 	return n;
@@ -297,8 +302,17 @@  static inline unsigned long __must_check copy_in_user(void __user *to, const voi
 	return n;
 }
 
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+static inline unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+	kasan_check_read(from, n);
+	return __copy_to_user(to, from, n);
+}
+
+static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+	kasan_check_write(to, n);
+	return __copy_from_user(to, from, n);
+}
 
 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 {