diff mbox series

[bpf-next] bpf: Check return from set_memory_rox() and friends

Message ID 63322c8e8454de9b240583de58cd730bc97bb789.1708165016.git.christophe.leroy@csgroup.eu (mailing list archive)
State New, archived
Headers show
Series [bpf-next] bpf: Check return from set_memory_rox() and friends | expand

Commit Message

Christophe Leroy Feb. 17, 2024, 10:24 a.m. UTC
arch_protect_bpf_trampoline() and alloc_new_pack() call
set_memory_rox() which can fail, leading to unprotected memory.

Take into account return from set_memory_XX() functions and add
__must_check flag to arch_protect_bpf_trampoline().

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
---
 arch/x86/net/bpf_jit_comp.c    |  6 ++++--
 include/linux/bpf.h            |  4 ++--
 kernel/bpf/bpf_struct_ops.c    |  9 +++++++--
 kernel/bpf/core.c              | 25 +++++++++++++++++++------
 kernel/bpf/trampoline.c        | 18 ++++++++++++------
 net/bpf/bpf_dummy_struct_ops.c |  4 +++-
 6 files changed, 47 insertions(+), 19 deletions(-)

Comments

Kees Cook Feb. 17, 2024, 10:45 p.m. UTC | #1
On Sat, Feb 17, 2024 at 11:24:07AM +0100, Christophe Leroy wrote:
> arch_protect_bpf_trampoline() and alloc_new_pack() call
> set_memory_rox() which can fail, leading to unprotected memory.
> 
> Take into account return from set_memory_XX() functions and add
> __must_check flag to arch_protect_bpf_trampoline().
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>

Thanks for doing this! This seems to hit all the right error paths that
I can see.

Reviewed-by: Kees Cook <keescook@chromium.org>
Simon Horman Feb. 19, 2024, 10:19 a.m. UTC | #2
On Sat, Feb 17, 2024 at 11:24:07AM +0100, Christophe Leroy wrote:
> arch_protect_bpf_trampoline() and alloc_new_pack() call
> set_memory_rox() which can fail, leading to unprotected memory.
> 
> Take into account return from set_memory_XX() functions and add
> __must_check flag to arch_protect_bpf_trampoline().
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>

...

> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index ea6843be2616..23ce17da3bf7 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -898,23 +898,30 @@ static LIST_HEAD(pack_list);
>  static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
>  {
>  	struct bpf_prog_pack *pack;
> +	int err;
>  
>  	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
>  		       GFP_KERNEL);
>  	if (!pack)
>  		return NULL;
>  	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
> -	if (!pack->ptr) {
> -		kfree(pack);
> -		return NULL;
> -	}
> +	if (!pack->ptr)
> +		goto out;
>  	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
>  	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
>  	list_add_tail(&pack->list, &pack_list);

Hi Christophe,

Here pack is added to pack_list.

>  
>  	set_vm_flush_reset_perms(pack->ptr);
> -	set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> +	err = set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
> +	if (err)
> +		goto out_free;

But this unwind path doesn't appear to remove pack form pack_list.

Flagged by Smatch.

>  	return pack;
> +
> +out_free:
> +	bpf_jit_free_exec(pack->ptr);
> +out:
> +	kfree(pack);
> +	return NULL;
>  }
>  
>  void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)

...
Christophe Leroy Feb. 19, 2024, 10:25 a.m. UTC | #3
Le 19/02/2024 à 11:19, Simon Horman a écrit :
> On Sat, Feb 17, 2024 at 11:24:07AM +0100, Christophe Leroy wrote:
>> arch_protect_bpf_trampoline() and alloc_new_pack() call
>> set_memory_rox() which can fail, leading to unprotected memory.
>>
>> Take into account return from set_memory_XX() functions and add
>> __must_check flag to arch_protect_bpf_trampoline().
>>
>> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> 
> ...
> 
>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
>> index ea6843be2616..23ce17da3bf7 100644
>> --- a/kernel/bpf/core.c
>> +++ b/kernel/bpf/core.c
>> @@ -898,23 +898,30 @@ static LIST_HEAD(pack_list);
>>   static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
>>   {
>>   	struct bpf_prog_pack *pack;
>> +	int err;
>>   
>>   	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
>>   		       GFP_KERNEL);
>>   	if (!pack)
>>   		return NULL;
>>   	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
>> -	if (!pack->ptr) {
>> -		kfree(pack);
>> -		return NULL;
>> -	}
>> +	if (!pack->ptr)
>> +		goto out;
>>   	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
>>   	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
>>   	list_add_tail(&pack->list, &pack_list);
> 
> Hi Christophe,
> 
> Here pack is added to pack_list.
> 
>>   
>>   	set_vm_flush_reset_perms(pack->ptr);
>> -	set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
>> +	err = set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
>> +	if (err)
>> +		goto out_free;
> 
> But this unwind path doesn't appear to remove pack form pack_list.

Ah, thanks,

Indeed I wondered about it and ignored it as I mis-read pack_list as 
pack->list, thinking it would implicitely fly away when droping pack.

I'll send a v2 in a few days once more people have reviewed it.

Christophe
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 919f647c740f..db05e0ba9f68 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2780,12 +2780,14 @@  void arch_free_bpf_trampoline(void *image, unsigned int size)
 	bpf_prog_pack_free(image, size);
 }
 
-void arch_protect_bpf_trampoline(void *image, unsigned int size)
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
 {
+	return 0;
 }
 
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int arch_unprotect_bpf_trampoline(void *image, unsigned int size)
 {
+	return 0;
 }
 
 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e30100597d0a..169847ed1f8d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1112,8 +1112,8 @@  int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
 				void *func_addr);
 void *arch_alloc_bpf_trampoline(unsigned int size);
 void arch_free_bpf_trampoline(void *image, unsigned int size);
-void arch_protect_bpf_trampoline(void *image, unsigned int size);
-void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
+int arch_unprotect_bpf_trampoline(void *image, unsigned int size);
 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
 			     struct bpf_tramp_links *tlinks, void *func_addr);
 
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 02068bd0e4d9..7638a735f48f 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -522,7 +522,9 @@  static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 			if (err)
 				goto reset_unlock;
 		}
-		arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
+		err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
+		if (err)
+			goto reset_unlock;
 		/* Let bpf_link handle registration & unregistration.
 		 *
 		 * Pair with smp_load_acquire() during lookup_elem().
@@ -531,7 +533,10 @@  static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 		goto unlock;
 	}
 
-	arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
+	err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
+	if (err)
+		goto reset_unlock;
+
 	err = st_ops->reg(kdata);
 	if (likely(!err)) {
 		/* This refcnt increment on the map here after
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ea6843be2616..23ce17da3bf7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -898,23 +898,30 @@  static LIST_HEAD(pack_list);
 static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
 {
 	struct bpf_prog_pack *pack;
+	int err;
 
 	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
 		       GFP_KERNEL);
 	if (!pack)
 		return NULL;
 	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
-	if (!pack->ptr) {
-		kfree(pack);
-		return NULL;
-	}
+	if (!pack->ptr)
+		goto out;
 	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
 	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
 	list_add_tail(&pack->list, &pack_list);
 
 	set_vm_flush_reset_perms(pack->ptr);
-	set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
+	err = set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
+	if (err)
+		goto out_free;
 	return pack;
+
+out_free:
+	bpf_jit_free_exec(pack->ptr);
+out:
+	kfree(pack);
+	return NULL;
 }
 
 void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
@@ -929,9 +936,15 @@  void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
 		size = round_up(size, PAGE_SIZE);
 		ptr = bpf_jit_alloc_exec(size);
 		if (ptr) {
+			int err;
+
 			bpf_fill_ill_insns(ptr, size);
 			set_vm_flush_reset_perms(ptr);
-			set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
+			err = set_memory_rox((unsigned long)ptr, size / PAGE_SIZE);
+			if (err) {
+				bpf_jit_free_exec(ptr);
+				ptr = NULL;
+			}
 		}
 		goto out;
 	}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index d382f5ebe06c..6e64ac9083b6 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -456,7 +456,9 @@  static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
 	if (err < 0)
 		goto out_free;
 
-	arch_protect_bpf_trampoline(im->image, im->size);
+	err = arch_protect_bpf_trampoline(im->image, im->size);
+	if (err)
+		goto out_free;
 
 	WARN_ON(tr->cur_image && total == 0);
 	if (tr->cur_image)
@@ -1072,17 +1074,21 @@  void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
 	bpf_jit_free_exec(image);
 }
 
-void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
+int __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
 {
 	WARN_ON_ONCE(size > PAGE_SIZE);
-	set_memory_rox((long)image, 1);
+	return set_memory_rox((long)image, 1);
 }
 
-void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+int __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
 {
+	int err;
 	WARN_ON_ONCE(size > PAGE_SIZE);
-	set_memory_nx((long)image, 1);
-	set_memory_rw((long)image, 1);
+
+	err = set_memory_nx((long)image, 1);
+	if (err)
+		return err;
+	return set_memory_rw((long)image, 1);
 }
 
 int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 8906f7bdf4a9..6d49a00fba4d 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -129,7 +129,9 @@  int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
 	if (err < 0)
 		goto out;
 
-	arch_protect_bpf_trampoline(image, PAGE_SIZE);
+	err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
+	if (err)
+		goto out;
 	prog_ret = dummy_ops_call_op(image, args);
 
 	err = dummy_ops_copy_args(args);