diff mbox series

[v9,03/44] kasan: group vmalloc code

Message ID 4d261d4cbd9842c51cb6f9b36cadc9054cabe86b.1605046192.git.andreyknvl@google.com (mailing list archive)
State New, archived
Headers show
Series [v9,01/44] kasan: drop unnecessary GPL text from comment headers | expand

Commit Message

Andrey Konovalov Nov. 10, 2020, 10:10 p.m. UTC
This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.

Group all vmalloc-related function declarations in include/linux/kasan.h,
and their implementations in mm/kasan/common.c.

No functional changes.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: Marco Elver <elver@google.com>
---
Change-Id: Ie20b6c689203cd6de4fd7f2c465ec081c00c5f15
---
 include/linux/kasan.h | 41 +++++++++++++----------
 mm/kasan/common.c     | 78 ++++++++++++++++++++++---------------------
 2 files changed, 63 insertions(+), 56 deletions(-)

Comments

Alexander Potapenko Nov. 11, 2020, 1:21 p.m. UTC | #1
On Tue, Nov 10, 2020 at 11:11 PM Andrey Konovalov <andreyknvl@google.com> wrote:
>
> This is a preparatory commit for the upcoming addition of a new hardware
> tag-based (MTE-based) KASAN mode.
>
> Group all vmalloc-related function declarations in include/linux/kasan.h,
> and their implementations in mm/kasan/common.c.
>
> No functional changes.
>
> Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
> ---
> Change-Id: Ie20b6c689203cd6de4fd7f2c465ec081c00c5f15
> ---
>  include/linux/kasan.h | 41 +++++++++++++----------
>  mm/kasan/common.c     | 78 ++++++++++++++++++++++---------------------
>  2 files changed, 63 insertions(+), 56 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 30d343b4a40a..59538e795df4 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -75,19 +75,6 @@ struct kasan_cache {
>         int free_meta_offset;
>  };
>
> -/*
> - * These functions provide a special case to support backing module
> - * allocations with real shadow memory. With KASAN vmalloc, the special
> - * case is unnecessary, as the work is handled in the generic case.
> - */
> -#ifndef CONFIG_KASAN_VMALLOC
> -int kasan_module_alloc(void *addr, size_t size);
> -void kasan_free_shadow(const struct vm_struct *vm);
> -#else
> -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> -static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> -#endif
> -
>  int kasan_add_zero_shadow(void *start, unsigned long size);
>  void kasan_remove_zero_shadow(void *start, unsigned long size);
>
> @@ -156,9 +143,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
>         return false;
>  }
>
> -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> -static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> -
>  static inline int kasan_add_zero_shadow(void *start, unsigned long size)
>  {
>         return 0;
> @@ -211,13 +195,16 @@ static inline void *kasan_reset_tag(const void *addr)
>  #endif /* CONFIG_KASAN_SW_TAGS */
>
>  #ifdef CONFIG_KASAN_VMALLOC
> +
>  int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
>  void kasan_poison_vmalloc(const void *start, unsigned long size);
>  void kasan_unpoison_vmalloc(const void *start, unsigned long size);
>  void kasan_release_vmalloc(unsigned long start, unsigned long end,
>                            unsigned long free_region_start,
>                            unsigned long free_region_end);
> -#else
> +
> +#else /* CONFIG_KASAN_VMALLOC */
> +
>  static inline int kasan_populate_vmalloc(unsigned long start,
>                                         unsigned long size)
>  {
> @@ -232,7 +219,25 @@ static inline void kasan_release_vmalloc(unsigned long start,
>                                          unsigned long end,
>                                          unsigned long free_region_start,
>                                          unsigned long free_region_end) {}
> -#endif
> +
> +#endif /* CONFIG_KASAN_VMALLOC */
> +
> +#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
> +
> +/*
> + * These functions provide a special case to support backing module
> + * allocations with real shadow memory. With KASAN vmalloc, the special
> + * case is unnecessary, as the work is handled in the generic case.
> + */
> +int kasan_module_alloc(void *addr, size_t size);
> +void kasan_free_shadow(const struct vm_struct *vm);
> +
> +#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
> +
> +static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> +static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> +
> +#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
>
>  #ifdef CONFIG_KASAN_INLINE
>  void kasan_non_canonical_hook(unsigned long addr);
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 33d863f55db1..89e5ef9417a7 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -536,44 +536,6 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
>         /* The object will be poisoned by page_alloc. */
>  }
>
> -#ifndef CONFIG_KASAN_VMALLOC
> -int kasan_module_alloc(void *addr, size_t size)
> -{
> -       void *ret;
> -       size_t scaled_size;
> -       size_t shadow_size;
> -       unsigned long shadow_start;
> -
> -       shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
> -       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
> -       shadow_size = round_up(scaled_size, PAGE_SIZE);
> -
> -       if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
> -               return -EINVAL;
> -
> -       ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
> -                       shadow_start + shadow_size,
> -                       GFP_KERNEL,
> -                       PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
> -                       __builtin_return_address(0));
> -
> -       if (ret) {
> -               __memset(ret, KASAN_SHADOW_INIT, shadow_size);
> -               find_vm_area(addr)->flags |= VM_KASAN;
> -               kmemleak_ignore(ret);
> -               return 0;
> -       }
> -
> -       return -ENOMEM;
> -}
> -
> -void kasan_free_shadow(const struct vm_struct *vm)
> -{
> -       if (vm->flags & VM_KASAN)
> -               vfree(kasan_mem_to_shadow(vm->addr));
> -}
> -#endif
> -
>  #ifdef CONFIG_MEMORY_HOTPLUG
>  static bool shadow_mapped(unsigned long addr)
>  {
> @@ -685,6 +647,7 @@ core_initcall(kasan_memhotplug_init);
>  #endif
>
>  #ifdef CONFIG_KASAN_VMALLOC
> +
>  static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
>                                       void *unused)
>  {
> @@ -923,4 +886,43 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
>                                        (unsigned long)shadow_end);
>         }
>  }
> +
> +#else /* CONFIG_KASAN_VMALLOC */
> +
> +int kasan_module_alloc(void *addr, size_t size)
> +{
> +       void *ret;
> +       size_t scaled_size;
> +       size_t shadow_size;
> +       unsigned long shadow_start;
> +
> +       shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
> +       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
> +       shadow_size = round_up(scaled_size, PAGE_SIZE);
> +
> +       if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
> +               return -EINVAL;
> +
> +       ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
> +                       shadow_start + shadow_size,
> +                       GFP_KERNEL,
> +                       PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
> +                       __builtin_return_address(0));
> +
> +       if (ret) {
> +               __memset(ret, KASAN_SHADOW_INIT, shadow_size);
> +               find_vm_area(addr)->flags |= VM_KASAN;
> +               kmemleak_ignore(ret);
> +               return 0;
> +       }
> +
> +       return -ENOMEM;
> +}
> +
> +void kasan_free_shadow(const struct vm_struct *vm)
> +{
> +       if (vm->flags & VM_KASAN)
> +               vfree(kasan_mem_to_shadow(vm->addr));
> +}
> +
>  #endif
> --
> 2.29.2.222.g5d2a92d10f8-goog
>
diff mbox series

Patch

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 30d343b4a40a..59538e795df4 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -75,19 +75,6 @@  struct kasan_cache {
 	int free_meta_offset;
 };
 
-/*
- * These functions provide a special case to support backing module
- * allocations with real shadow memory. With KASAN vmalloc, the special
- * case is unnecessary, as the work is handled in the generic case.
- */
-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
-#else
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-#endif
-
 int kasan_add_zero_shadow(void *start, unsigned long size);
 void kasan_remove_zero_shadow(void *start, unsigned long size);
 
@@ -156,9 +143,6 @@  static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
 	return false;
 }
 
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
-
 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
 {
 	return 0;
@@ -211,13 +195,16 @@  static inline void *kasan_reset_tag(const void *addr)
 #endif /* CONFIG_KASAN_SW_TAGS */
 
 #ifdef CONFIG_KASAN_VMALLOC
+
 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
 void kasan_poison_vmalloc(const void *start, unsigned long size);
 void kasan_unpoison_vmalloc(const void *start, unsigned long size);
 void kasan_release_vmalloc(unsigned long start, unsigned long end,
 			   unsigned long free_region_start,
 			   unsigned long free_region_end);
-#else
+
+#else /* CONFIG_KASAN_VMALLOC */
+
 static inline int kasan_populate_vmalloc(unsigned long start,
 					unsigned long size)
 {
@@ -232,7 +219,25 @@  static inline void kasan_release_vmalloc(unsigned long start,
 					 unsigned long end,
 					 unsigned long free_region_start,
 					 unsigned long free_region_end) {}
-#endif
+
+#endif /* CONFIG_KASAN_VMALLOC */
+
+#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
+
+/*
+ * These functions provide a special case to support backing module
+ * allocations with real shadow memory. With KASAN vmalloc, the special
+ * case is unnecessary, as the work is handled in the generic case.
+ */
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_free_shadow(const struct vm_struct *vm);
+
+#else /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
+
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+
+#endif /* CONFIG_KASAN && !CONFIG_KASAN_VMALLOC */
 
 #ifdef CONFIG_KASAN_INLINE
 void kasan_non_canonical_hook(unsigned long addr);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 33d863f55db1..89e5ef9417a7 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -536,44 +536,6 @@  void kasan_kfree_large(void *ptr, unsigned long ip)
 	/* The object will be poisoned by page_alloc. */
 }
 
-#ifndef CONFIG_KASAN_VMALLOC
-int kasan_module_alloc(void *addr, size_t size)
-{
-	void *ret;
-	size_t scaled_size;
-	size_t shadow_size;
-	unsigned long shadow_start;
-
-	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
-	shadow_size = round_up(scaled_size, PAGE_SIZE);
-
-	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
-		return -EINVAL;
-
-	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
-			shadow_start + shadow_size,
-			GFP_KERNEL,
-			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
-			__builtin_return_address(0));
-
-	if (ret) {
-		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
-		find_vm_area(addr)->flags |= VM_KASAN;
-		kmemleak_ignore(ret);
-		return 0;
-	}
-
-	return -ENOMEM;
-}
-
-void kasan_free_shadow(const struct vm_struct *vm)
-{
-	if (vm->flags & VM_KASAN)
-		vfree(kasan_mem_to_shadow(vm->addr));
-}
-#endif
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 static bool shadow_mapped(unsigned long addr)
 {
@@ -685,6 +647,7 @@  core_initcall(kasan_memhotplug_init);
 #endif
 
 #ifdef CONFIG_KASAN_VMALLOC
+
 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
 				      void *unused)
 {
@@ -923,4 +886,43 @@  void kasan_release_vmalloc(unsigned long start, unsigned long end,
 				       (unsigned long)shadow_end);
 	}
 }
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+int kasan_module_alloc(void *addr, size_t size)
+{
+	void *ret;
+	size_t scaled_size;
+	size_t shadow_size;
+	unsigned long shadow_start;
+
+	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+	shadow_size = round_up(scaled_size, PAGE_SIZE);
+
+	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+		return -EINVAL;
+
+	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
+			shadow_start + shadow_size,
+			GFP_KERNEL,
+			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
+			__builtin_return_address(0));
+
+	if (ret) {
+		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
+		find_vm_area(addr)->flags |= VM_KASAN;
+		kmemleak_ignore(ret);
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+void kasan_free_shadow(const struct vm_struct *vm)
+{
+	if (vm->flags & VM_KASAN)
+		vfree(kasan_mem_to_shadow(vm->addr));
+}
+
 #endif