@@ -250,6 +250,8 @@ void __init kasan_init(void)
memset(kasan_zero_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ khwasan_init();
+
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
@@ -155,6 +155,35 @@ static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
#define KASAN_SHADOW_INIT 0xFF
+void khwasan_init(void);
+
+void *khwasan_reset_tag(const void *addr);
+
+void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr);
+void *khwasan_preset_slab_tag(struct kmem_cache *cache, unsigned int idx,
+ const void *addr);
+
+#else /* CONFIG_KASAN_HW */
+
+static inline void khwasan_init(void) { }
+
+static inline void *khwasan_reset_tag(const void *addr)
+{
+ return (void *)addr;
+}
+
+static inline void *khwasan_preset_slub_tag(struct kmem_cache *cache,
+ const void *addr)
+{
+ return (void *)addr;
+}
+
+static inline void *khwasan_preset_slab_tag(struct kmem_cache *cache,
+ unsigned int idx, const void *addr)
+{
+ return (void *)addr;
+}
+
#endif /* CONFIG_KASAN_HW */
#endif /* LINUX_KASAN_H */
@@ -8,6 +8,10 @@
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+#define KHWASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KHWASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KHWASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
@@ -126,6 +130,57 @@ static inline void quarantine_reduce(void) { }
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
#endif
+#ifdef CONFIG_KASAN_HW
+
+#define KHWASAN_TAG_SHIFT 56
+#define KHWASAN_TAG_MASK (0xFFUL << KHWASAN_TAG_SHIFT)
+
+u8 random_tag(void);
+
+static inline void *set_tag(const void *addr, u8 tag)
+{
+ u64 a = (u64)addr;
+
+ a &= ~KHWASAN_TAG_MASK;
+ a |= ((u64)tag << KHWASAN_TAG_SHIFT);
+
+ return (void *)a;
+}
+
+static inline u8 get_tag(const void *addr)
+{
+ return (u8)((u64)addr >> KHWASAN_TAG_SHIFT);
+}
+
+static inline void *reset_tag(const void *addr)
+{
+ return set_tag(addr, KHWASAN_TAG_KERNEL);
+}
+
+#else /* CONFIG_KASAN_HW */
+
+static inline u8 random_tag(void)
+{
+ return 0;
+}
+
+static inline void *set_tag(const void *addr, u8 tag)
+{
+ return (void *)addr;
+}
+
+static inline u8 get_tag(const void *addr)
+{
+ return 0;
+}
+
+static inline void *reset_tag(const void *addr)
+{
+ return (void *)addr;
+}
+
+#endif /* CONFIG_KASAN_HW */
+
/*
* Exported functions for interfaces called from assembly or from generated
* code. Declarations here to avoid warning about missing declarations.
@@ -38,6 +38,71 @@
#include "kasan.h"
#include "../slab.h"
+static DEFINE_PER_CPU(u32, prng_state);
+
+void khwasan_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(prng_state, cpu) = get_random_u32();
+}
+
+/*
+ * If a preemption happens between this_cpu_read and this_cpu_write, the only
+ * side effect is that we'll give a few allocated in different contexts objects
+ * the same tag. Since KHWASAN is meant to be used a probabilistic bug-detection
+ * debug feature, this doesn’t have significant negative impact.
+ *
+ * Ideally the tags use strong randomness to prevent any attempts to predict
+ * them during explicit exploit attempts. But strong randomness is expensive,
+ * and we did an intentional trade-off to use a PRNG. This non-atomic RMW
+ * sequence has in fact positive effect, since interrupts that randomly skew
+ * PRNG at unpredictable points do only good.
+ */
+u8 random_tag(void)
+{
+ u32 state = this_cpu_read(prng_state);
+
+ state = 1664525 * state + 1013904223;
+ this_cpu_write(prng_state, state);
+
+ return (u8)(state % (KHWASAN_TAG_MAX + 1));
+}
+
+void *khwasan_reset_tag(const void *addr)
+{
+ return reset_tag(addr);
+}
+
+void *khwasan_preset_slub_tag(struct kmem_cache *cache, const void *addr)
+{
+ /*
+ * Since it's desirable to only call object contructors ones during
+ * slab allocation, we preassign tags to all such objects.
+ * Also preassign tags for SLAB_TYPESAFE_BY_RCU slabs to avoid
+ * use-after-free reports.
+ */
+ if (cache->ctor || cache->flags & SLAB_TYPESAFE_BY_RCU)
+ return set_tag(addr, random_tag());
+ return (void *)addr;
+}
+
+void *khwasan_preset_slab_tag(struct kmem_cache *cache, unsigned int idx,
+ const void *addr)
+{
+ /*
+ * See comment in khwasan_preset_slub_tag.
+ * For SLAB allocator we can't preassign tags randomly since the
+ * freelist is stored as an array of indexes instead of a linked
+ * list. Assign tags based on objects indexes, so that objects that
+ * are next to each other get different tags.
+ */
+ if (cache->ctor || cache->flags & SLAB_TYPESAFE_BY_RCU)
+ return set_tag(addr, (u8)idx);
+ return (void *)addr;
+}
+
void check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip)
{
This commit adds a few helper functions, that are meant to be used to work with tags embedded in the top byte of kernel pointers: to set, to get or to reset (set to 0xff) the top byte. Signed-off-by: Andrey Konovalov <andreyknvl@google.com> --- arch/arm64/mm/kasan_init.c | 2 ++ include/linux/kasan.h | 29 +++++++++++++++++ mm/kasan/kasan.h | 55 ++++++++++++++++++++++++++++++++ mm/kasan/khwasan.c | 65 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 151 insertions(+)