@@ -484,6 +484,13 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq
processing.
+config ARCH_HAS_ELF_RANDOMIZE
+ bool
+ help
+ An architecture supports choosing randomized locations for
+ stack, mmap, brk, and ET_DYN. Defined functions:
+ - arch_mmap_rnd(), must respect (current->flags & PF_RANDOMIZE)
+
#
# ABI hall of shame
#
@@ -3,6 +3,7 @@ config ARM
default y
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -169,7 +169,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
unsigned long rnd = 0UL;
@@ -183,7 +183,7 @@ static unsigned long mmap_rnd(void)
void arch_pick_mmap_layout(struct mm_struct *mm)
{
- unsigned long random_factor = mmap_rnd();
+ unsigned long random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -2,6 +2,7 @@ config ARM64
def_bool y
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -47,7 +47,7 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
unsigned long rnd = 0;
@@ -66,7 +66,7 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
+ return PAGE_ALIGN(STACK_TOP - gap - arch_mmap_rnd());
}
/*
@@ -24,6 +24,7 @@ config MIPS
select HAVE_DEBUG_KMEMLEAK
select HAVE_SYSCALL_TRACEPOINTS
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
@@ -164,9 +164,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
}
}
-static inline unsigned long brk_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd = get_random_int();
+ unsigned long rnd = 0;
+
+ if (current->flags & PF_RANDOMIZE)
+ rnd = get_random_int();
rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
@@ -183,7 +186,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
unsigned long base = mm->brk;
unsigned long ret;
- ret = PAGE_ALIGN(base + brk_rnd());
+ ret = PAGE_ALIGN(base + arch_mmap_rnd());
if (ret < mm->brk)
return mm->brk;
@@ -89,6 +89,7 @@ config PPC
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
@@ -53,7 +53,7 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
unsigned long rnd = 0;
@@ -76,7 +76,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - arch_mmap_rnd());
}
/*
@@ -65,6 +65,7 @@ config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -60,7 +60,7 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
if (!(current->flags & PF_RANDOMIZE))
return 0;
@@ -72,7 +72,7 @@ static unsigned long mmap_rnd(void)
static unsigned long mmap_base_legacy(void)
{
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + arch_mmap_rnd();
}
static inline unsigned long mmap_base(void)
@@ -84,7 +84,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
gap &= PAGE_MASK;
- return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
+ return STACK_TOP - stack_maxrandom_size() - arch_mmap_rnd() - gap;
}
unsigned long
@@ -187,7 +187,7 @@ unsigned long randomize_et_dyn(void)
if (!is_32bit_task())
/* Align to 4GB */
base &= ~((1UL << 32) - 1);
- return base + mmap_rnd();
+ return base + arch_mmap_rnd();
}
#ifndef CONFIG_64BIT
@@ -88,6 +88,7 @@ config X86
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
select HAVE_USER_RETURN_NOTIFIER
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
+ select ARCH_HAS_ELF_RANDOMIZE
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select SPARSE_IRQ
@@ -65,7 +65,7 @@ static int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
unsigned long rnd = 0;
@@ -91,7 +91,7 @@ static unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
+ return PAGE_ALIGN(TASK_SIZE - gap - arch_mmap_rnd());
}
/*
@@ -103,7 +103,7 @@ static unsigned long mmap_legacy_base(void)
if (mmap_is_ia32())
return TASK_UNMAPPED_BASE;
else
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + arch_mmap_rnd();
}
/*
@@ -31,6 +31,7 @@
#include <linux/security.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <linux/elf-randomize.h>
#include <linux/utsname.h>
#include <linux/coredump.h>
#include <linux/sched.h>
new file mode 100644
@@ -0,0 +1,10 @@
+#ifndef _ELF_RANDOMIZE_H
+#define _ELF_RANDOMIZE_H
+
+#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE
+static inline unsigned long arch_mmap_rnd(void) { return 0; }
+#else
+extern unsigned long arch_mmap_rnd(void);
+#endif
+
+#endif
When an architecture fully supports randomizing the ELF load location, a per-arch mmap_rnd() function is used to finding a randomized mmap base. In preparation for randomizing the location of ET_DYN binaries separately from mmap, this renames and exports these functions as arch_mmap_rnd(). Additionally introduces CONFIG_ARCH_HAS_ELF_RANDOMIZE for describing this feature on architectures that support it (which is a superset of ARCH_BINFMT_ELF_RANDOMIZE_PIE, since s390 already does this witout the ARCH_BINFMT_ELF_RANDOMIZE_PIE logic). Signed-off-by: Kees Cook <keescook@chromium.org> --- arch/Kconfig | 7 +++++++ arch/arm/Kconfig | 1 + arch/arm/mm/mmap.c | 4 ++-- arch/arm64/Kconfig | 1 + arch/arm64/mm/mmap.c | 4 ++-- arch/mips/Kconfig | 1 + arch/mips/mm/mmap.c | 9 ++++++--- arch/powerpc/Kconfig | 1 + arch/powerpc/mm/mmap.c | 4 ++-- arch/s390/Kconfig | 1 + arch/s390/mm/mmap.c | 8 ++++---- arch/x86/Kconfig | 1 + arch/x86/mm/mmap.c | 6 +++--- fs/binfmt_elf.c | 1 + include/linux/elf-randomize.h | 10 ++++++++++ 15 files changed, 43 insertions(+), 16 deletions(-) create mode 100644 include/linux/elf-randomize.h