diff mbox

[PATCHv3,2/2] arm64: Add CONFIG_DEBUG_SET_MODULE_RONX support

Message ID 1407886667-16919-3-git-send-email-lauraa@codeaurora.org (mailing list archive)
State New, archived
Headers show

Commit Message

Laura Abbott Aug. 12, 2014, 11:37 p.m. UTC
In a similar fashion to other architecture, add the infrastructure
and Kconfig to enable DEBUG_SET_MODULE_RONX support. When
enabled, module ranges will be marked read-only/no-execute as
appropriate.

Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
---
 arch/arm64/Kconfig.debug            |  11 ++++
 arch/arm64/include/asm/cacheflush.h |   4 ++
 arch/arm64/mm/Makefile              |   2 +-
 arch/arm64/mm/pageattr.c            | 118 ++++++++++++++++++++++++++++++++++++
 4 files changed, 134 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/mm/pageattr.c

Comments

Will Deacon Aug. 19, 2014, 12:11 p.m. UTC | #1
Hi Laura,

On Wed, Aug 13, 2014 at 12:37:47AM +0100, Laura Abbott wrote:
> In a similar fashion to other architecture, add the infrastructure
> and Kconfig to enable DEBUG_SET_MODULE_RONX support. When
> enabled, module ranges will be marked read-only/no-execute as
> appropriate.

[...]

> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> new file mode 100644
> index 0000000..13573da
> --- /dev/null
> +++ b/arch/arm64/mm/pageattr.c
> @@ -0,0 +1,118 @@
> +/*
> + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 and
> + * only version 2 as published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +#include <linux/kernel.h>
> +#include <linux/mm.h>
> +#include <linux/module.h>
> +#include <linux/sched.h>
> +
> +#include <asm/pgtable.h>
> +#include <asm/tlbflush.h>
> +
> +static int __change_memory(pte_t *ptep, pgtable_t token, unsigned long addr,
> +			pgprot_t prot, bool set)
> +{
> +	pte_t pte;
> +
> +	if (set)
> +		pte = set_pte_bit(*ptep, prot);
> +	else
> +		pte = clear_pte_bit(*ptep, prot);
> +	set_pte(ptep, pte);
> +	return 0;
> +}

Why do we need to propagate the token this far?

> +static int set_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
> +			void *data)
> +{
> +	pgprot_t prot = (pgprot_t)data;
> +
> +	return __change_memory(ptep, token, addr, prot, true);
> +}
> +
> +static int clear_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
> +			void *data)
> +{
> +	pgprot_t prot = (pgprot_t)data;
> +
> +	return __change_memory(ptep, token, addr, prot, false);
> +}
> +
> +static int change_memory_common(unsigned long addr, int numpages,
> +				pgprot_t prot, bool set)
> +{
> +	unsigned long start = addr;
> +	unsigned long size = PAGE_SIZE*numpages;
> +	unsigned long end = start + size;
> +	int ret;
> +
> +	if (!is_module_address(start) || !is_module_address(end))
> +		return -EINVAL;
> +
> +	if (set)
> +		ret = apply_to_page_range(&init_mm, start, size,
> +					set_page_range, (void *)prot);
> +	else
> +		ret = apply_to_page_range(&init_mm, start, size,
> +					clear_page_range, (void *)prot);

What if addr isn't page-aligned?

> +	flush_tlb_kernel_range(start, end);
> +	return ret;
> +}
> +
> +static int change_memory_set_bit(unsigned long addr, int numpages,
> +					pgprot_t prot)
> +{
> +	return change_memory_common(addr, numpages, prot, true);
> +}
> +
> +static int change_memory_clear_bit(unsigned long addr, int numpages,
> +					pgprot_t prot)
> +{
> +	return change_memory_common(addr, numpages, prot, false);
> +}
> +
> +int set_memory_ro(unsigned long addr, int numpages)
> +{
> +	int ret;
> +
> +	ret = change_memory_clear_bit(addr, numpages, __pgprot(PTE_WRITE));
> +	if (ret)
> +		return ret;

I'm slightly uncomfortable with this. Can other cores make the pages
writable again here before we've set R/O? I think this would be neater if,
instead of explicit set/clear operations, we had a single function that
takes a set mask and a clear mask and writes the page table once (we also
avoid the extra TLBI that way)

> +	return change_memory_set_bit(addr, numpages, __pgprot(PTE_RDONLY));
> +}
> +EXPORT_SYMBOL_GPL(set_memory_ro);
> +
> +int set_memory_rw(unsigned long addr, int numpages)
> +{
> +	int ret;
> +
> +	ret = change_memory_set_bit(addr, numpages, __pgprot(PTE_WRITE));
> +	if (ret)
> +		return ret;
> +
> +	return change_memory_clear_bit(addr, numpages, __pgprot(PTE_RDONLY));
> +}
> +EXPORT_SYMBOL_GPL(set_memory_rw);
> +
> +int set_memory_nx(unsigned long addr, int numpages)
> +{
> +	return change_memory_set_bit(addr, numpages, __pgprot(PTE_PXN));
> +}
> +EXPORT_SYMBOL_GPL(set_memory_nx);
> +
> +int set_memory_x(unsigned long addr, int numpages)
> +{
> +	return change_memory_clear_bit(addr, numpages, __pgprot(PTE_PXN));
> +}

What about UXN? (I don't have the ARM ARM to hand, but we may want to set
both)

Will
Laura Abbott Aug. 19, 2014, 5:21 p.m. UTC | #2
On 8/19/2014 5:11 AM, Will Deacon wrote:
> Hi Laura,
> 
> On Wed, Aug 13, 2014 at 12:37:47AM +0100, Laura Abbott wrote:
>> In a similar fashion to other architecture, add the infrastructure
>> and Kconfig to enable DEBUG_SET_MODULE_RONX support. When
>> enabled, module ranges will be marked read-only/no-execute as
>> appropriate.
> 
> [...]
> 
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> new file mode 100644
>> index 0000000..13573da
>> --- /dev/null
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -0,0 +1,118 @@
>> +/*
>> + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 and
>> + * only version 2 as published by the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + * GNU General Public License for more details.
>> + */
>> +#include <linux/kernel.h>
>> +#include <linux/mm.h>
>> +#include <linux/module.h>
>> +#include <linux/sched.h>
>> +
>> +#include <asm/pgtable.h>
>> +#include <asm/tlbflush.h>
>> +
>> +static int __change_memory(pte_t *ptep, pgtable_t token, unsigned long addr,
>> +			pgprot_t prot, bool set)
>> +{
>> +	pte_t pte;
>> +
>> +	if (set)
>> +		pte = set_pte_bit(*ptep, prot);
>> +	else
>> +		pte = clear_pte_bit(*ptep, prot);
>> +	set_pte(ptep, pte);
>> +	return 0;
>> +}
> 
> Why do we need to propagate the token this far?
> 

Not quite sure I understand the comment here.

>> +static int set_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
>> +			void *data)
>> +{
>> +	pgprot_t prot = (pgprot_t)data;
>> +
>> +	return __change_memory(ptep, token, addr, prot, true);
>> +}
>> +
>> +static int clear_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
>> +			void *data)
>> +{
>> +	pgprot_t prot = (pgprot_t)data;
>> +
>> +	return __change_memory(ptep, token, addr, prot, false);
>> +}
>> +
>> +static int change_memory_common(unsigned long addr, int numpages,
>> +				pgprot_t prot, bool set)
>> +{
>> +	unsigned long start = addr;
>> +	unsigned long size = PAGE_SIZE*numpages;
>> +	unsigned long end = start + size;
>> +	int ret;
>> +
>> +	if (!is_module_address(start) || !is_module_address(end))
>> +		return -EINVAL;
>> +
>> +	if (set)
>> +		ret = apply_to_page_range(&init_mm, start, size,
>> +					set_page_range, (void *)prot);
>> +	else
>> +		ret = apply_to_page_range(&init_mm, start, size,
>> +					clear_page_range, (void *)prot);
> 
> What if addr isn't page-aligned?
> 

I think it would be harmless. x86 rounds down and does a WARN_ONCE
though so I'll put something similar here.

>> +	flush_tlb_kernel_range(start, end);
>> +	return ret;
>> +}
>> +
>> +static int change_memory_set_bit(unsigned long addr, int numpages,
>> +					pgprot_t prot)
>> +{
>> +	return change_memory_common(addr, numpages, prot, true);
>> +}
>> +
>> +static int change_memory_clear_bit(unsigned long addr, int numpages,
>> +					pgprot_t prot)
>> +{
>> +	return change_memory_common(addr, numpages, prot, false);
>> +}
>> +
>> +int set_memory_ro(unsigned long addr, int numpages)
>> +{
>> +	int ret;
>> +
>> +	ret = change_memory_clear_bit(addr, numpages, __pgprot(PTE_WRITE));
>> +	if (ret)
>> +		return ret;
> 
> I'm slightly uncomfortable with this. Can other cores make the pages
> writable again here before we've set R/O? I think this would be neater if,
> instead of explicit set/clear operations, we had a single function that
> takes a set mask and a clear mask and writes the page table once (we also
> avoid the extra TLBI that way)
> 

Yes, that does seem cleaner.

>> +	return change_memory_set_bit(addr, numpages, __pgprot(PTE_RDONLY));
>> +}
>> +EXPORT_SYMBOL_GPL(set_memory_ro);
>> +
>> +int set_memory_rw(unsigned long addr, int numpages)
>> +{
>> +	int ret;
>> +
>> +	ret = change_memory_set_bit(addr, numpages, __pgprot(PTE_WRITE));
>> +	if (ret)
>> +		return ret;
>> +
>> +	return change_memory_clear_bit(addr, numpages, __pgprot(PTE_RDONLY));
>> +}
>> +EXPORT_SYMBOL_GPL(set_memory_rw);
>> +
>> +int set_memory_nx(unsigned long addr, int numpages)
>> +{
>> +	return change_memory_set_bit(addr, numpages, __pgprot(PTE_PXN));
>> +}
>> +EXPORT_SYMBOL_GPL(set_memory_nx);
>> +
>> +int set_memory_x(unsigned long addr, int numpages)
>> +{
>> +	return change_memory_clear_bit(addr, numpages, __pgprot(PTE_PXN));
>> +}
> 
> What about UXN? (I don't have the ARM ARM to hand, but we may want to set
> both)
> 

Both PAGE_KERNEL and PAGE_KERNEL_EXEC already set PTE_UXN. It seems
redundant to add it again given this is currently restricted to module
addresses. It's cheap to add it though for paranoia's sake.

> Will
>  

Thanks,
Laura
Will Deacon Aug. 22, 2014, 3:45 p.m. UTC | #3
Hi Laura,

On Tue, Aug 19, 2014 at 06:21:27PM +0100, Laura Abbott wrote:
> On 8/19/2014 5:11 AM, Will Deacon wrote:
> > On Wed, Aug 13, 2014 at 12:37:47AM +0100, Laura Abbott wrote:
> >> +static int __change_memory(pte_t *ptep, pgtable_t token, unsigned long addr,
> >> +			pgprot_t prot, bool set)
> >> +{
> >> +	pte_t pte;
> >> +
> >> +	if (set)
> >> +		pte = set_pte_bit(*ptep, prot);
> >> +	else
> >> +		pte = clear_pte_bit(*ptep, prot);
> >> +	set_pte(ptep, pte);
> >> +	return 0;
> >> +}
> > 
> > Why do we need to propagate the token this far?
> > 
> 
> Not quite sure I understand the comment here.

Sorry, I was reviewing patches on a plane and ended up being a bit too
terse. I basically don't understand why we have to pass the `token'
parameter to __change_memory as it appears to be unused.

> > What about UXN? (I don't have the ARM ARM to hand, but we may want to set
> > both)
> > 
> 
> Both PAGE_KERNEL and PAGE_KERNEL_EXEC already set PTE_UXN. It seems
> redundant to add it again given this is currently restricted to module
> addresses. It's cheap to add it though for paranoia's sake.

I missed that, so I think it's fine to leave this part as-is.

Will
diff mbox

Patch

diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 4ee8e90..0a12933 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -43,4 +43,15 @@  config ARM64_RANDOMIZE_TEXT_OFFSET
 	  of TEXT_OFFSET and platforms must not require a specific
 	  value.
 
+config DEBUG_SET_MODULE_RONX
+        bool "Set loadable kernel module data as NX and text as RO"
+        depends on MODULES
+        help
+          This option helps catch unintended modifications to loadable
+          kernel module's text and read-only data. It also prevents execution
+          of module data. Such protection may interfere with run-time code
+          patching and dynamic kernel tracing - and they might also protect
+          against certain classes of kernel exploits.
+          If in doubt, say "N".
+
 endmenu
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index f2defe1..689b637 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -148,4 +148,8 @@  static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 {
 }
 
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
 #endif
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3ecb56c..c56179e 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -1,5 +1,5 @@ 
 obj-y				:= dma-mapping.o extable.o fault.o init.o \
 				   cache.o copypage.o flush.o \
 				   ioremap.o mmap.o pgd.o mmu.o \
-				   context.o proc.o
+				   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
new file mode 100644
index 0000000..13573da
--- /dev/null
+++ b/arch/arm64/mm/pageattr.c
@@ -0,0 +1,118 @@ 
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static int __change_memory(pte_t *ptep, pgtable_t token, unsigned long addr,
+			pgprot_t prot, bool set)
+{
+	pte_t pte;
+
+	if (set)
+		pte = set_pte_bit(*ptep, prot);
+	else
+		pte = clear_pte_bit(*ptep, prot);
+	set_pte(ptep, pte);
+	return 0;
+}
+
+static int set_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+			void *data)
+{
+	pgprot_t prot = (pgprot_t)data;
+
+	return __change_memory(ptep, token, addr, prot, true);
+}
+
+static int clear_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+			void *data)
+{
+	pgprot_t prot = (pgprot_t)data;
+
+	return __change_memory(ptep, token, addr, prot, false);
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+				pgprot_t prot, bool set)
+{
+	unsigned long start = addr;
+	unsigned long size = PAGE_SIZE*numpages;
+	unsigned long end = start + size;
+	int ret;
+
+	if (!is_module_address(start) || !is_module_address(end))
+		return -EINVAL;
+
+	if (set)
+		ret = apply_to_page_range(&init_mm, start, size,
+					set_page_range, (void *)prot);
+	else
+		ret = apply_to_page_range(&init_mm, start, size,
+					clear_page_range, (void *)prot);
+
+	flush_tlb_kernel_range(start, end);
+	return ret;
+}
+
+static int change_memory_set_bit(unsigned long addr, int numpages,
+					pgprot_t prot)
+{
+	return change_memory_common(addr, numpages, prot, true);
+}
+
+static int change_memory_clear_bit(unsigned long addr, int numpages,
+					pgprot_t prot)
+{
+	return change_memory_common(addr, numpages, prot, false);
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+	int ret;
+
+	ret = change_memory_clear_bit(addr, numpages, __pgprot(PTE_WRITE));
+	if (ret)
+		return ret;
+
+	return change_memory_set_bit(addr, numpages, __pgprot(PTE_RDONLY));
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+	int ret;
+
+	ret = change_memory_set_bit(addr, numpages, __pgprot(PTE_WRITE));
+	if (ret)
+		return ret;
+
+	return change_memory_clear_bit(addr, numpages, __pgprot(PTE_RDONLY));
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+	return change_memory_set_bit(addr, numpages, __pgprot(PTE_PXN));
+}
+EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+	return change_memory_clear_bit(addr, numpages, __pgprot(PTE_PXN));
+}
+EXPORT_SYMBOL_GPL(set_memory_x);