@@ -6,6 +6,7 @@
#define __ASM_CACHE_H
#include <asm/cputype.h>
+#include <asm/mte-def.h>
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
@@ -49,16 +50,22 @@
*/
#define ARCH_DMA_MINALIGN (128)
+#ifndef __ASSEMBLY__
+
+#include <linux/bitops.h>
+#include <linux/kasan-enabled.h>
+
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
-#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
+static inline size_t arch_slab_minalign(void)
+{
+ return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE :
+ __alignof__(unsigned long long);
+}
+#define arch_slab_minalign() arch_slab_minalign()
#endif
-#ifndef __ASSEMBLY__
-
-#include <linux/bitops.h>
-
#define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1
extern unsigned long __icache_flags;
@@ -209,6 +209,18 @@ void kmem_dump_obj(void *object);
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif
+/*
+ * Arches can define this function if they want to decide the minimum slab
+ * alignment at runtime. The value returned by the function must be a power
+ * of two and >= ARCH_SLAB_MINALIGN.
+ */
+#ifndef arch_slab_minalign
+static inline size_t arch_slab_minalign(void)
+{
+ return ARCH_SLAB_MINALIGN;
+}
+#endif
+
/*
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
@@ -3009,10 +3009,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
- if (ARCH_SLAB_MINALIGN &&
- ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
- pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
- objp, (int)ARCH_SLAB_MINALIGN);
+ if ((unsigned long)objp & (arch_slab_minalign() - 1)) {
+ pr_err("0x%px: not aligned to arch_slab_minalign()=%d\n", objp,
+ (int)arch_slab_minalign());
}
return objp;
}
@@ -154,8 +154,7 @@ static unsigned int calculate_alignment(slab_flags_t flags,
align = max(align, ralign);
}
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
+ align = max_t(size_t, align, arch_slab_minalign());
return ALIGN(align, sizeof(void *));
}
@@ -478,7 +478,7 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{
unsigned int *m;
- int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, arch_slab_minalign());
void *ret;
gfp &= gfp_allowed_mask;
@@ -555,7 +555,7 @@ void kfree(const void *block)
sp = virt_to_folio(block);
if (folio_test_slab(sp)) {
- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, arch_slab_minalign());
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else {
@@ -584,7 +584,7 @@ size_t __ksize(const void *block)
if (unlikely(!folio_test_slab(folio)))
return folio_size(folio);
- align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ align = max_t(size_t, ARCH_KMALLOC_MINALIGN, arch_slab_minalign());
m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
}