@@ -3,6 +3,8 @@
#include <asm-generic/sections.h>
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
extern char _exiprom[];
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_ARM_SECTIONS_H */
@@ -7,6 +7,8 @@
#ifndef _BLACKFIN_SECTIONS_H
#define _BLACKFIN_SECTIONS_H
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+
/* only used when MTD_UCLINUX */
extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
@@ -62,6 +64,8 @@ static inline int arch_is_kernel_data(unsigned long addr)
}
#define arch_is_kernel_data(addr) arch_is_kernel_data(addr)
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
+
#include <asm-generic/sections.h>
#endif
@@ -6,9 +6,12 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <asm-generic/sections.h>
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+
#include <linux/elf.h>
#include <linux/uaccess.h>
-#include <asm-generic/sections.h>
extern char __phys_per_cpu_start[];
#ifdef CONFIG_SMP
@@ -37,6 +40,6 @@ static inline void *dereference_function_descriptor(void *ptr)
return ptr;
}
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_IA64_SECTIONS_H */
-
@@ -1,11 +1,12 @@
#ifndef _ASM_POWERPC_SECTIONS_H
#define _ASM_POWERPC_SECTIONS_H
-#ifdef __KERNEL__
-#include <linux/elf.h>
-#include <linux/uaccess.h>
#include <asm-generic/sections.h>
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+
+#include <linux/elf.h>
+#include <linux/uaccess.h>
#ifdef __powerpc64__
extern char __start_interrupts[];
@@ -75,7 +76,7 @@ static inline void *dereference_function_descriptor(void *ptr)
}
#endif /* PPC64_ELF_ABI_v1 */
-#endif
+#endif /* __powerpc64__ */
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SECTIONS_H */
@@ -3,9 +3,11 @@
#include <asm-generic/sections.h>
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char __start_eh_frame[], __stop_eh_frame[];
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
#endif /* __ASM_SH_SECTIONS_H */
@@ -4,10 +4,14 @@
/* nothing to see, move along */
#include <asm-generic/sections.h>
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+
/* sparc entry point */
extern char _start[];
extern char __leon_1insn_patch[];
extern char __leon_1insn_patch_end[];
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
+
#endif
@@ -19,6 +19,8 @@
#include <asm-generic/sections.h>
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+
/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];
@@ -44,4 +46,6 @@ static inline int arch_is_kernel_data(unsigned long addr)
addr < (unsigned long)_end;
}
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
+
#endif /* _ASM_TILE_SECTIONS_H */
@@ -2,13 +2,34 @@
#define _ASM_X86_SECTIONS_H
#include <asm-generic/sections.h>
-#include <asm/uaccess.h>
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
extern char __brk_base[], __brk_limit[];
+
+/*
+ * The exception table consists of triples of addresses relative to the
+ * exception table entry itself. The first address is of an instruction
+ * that is allowed to fault, the second is the target at which the program
+ * should continue. The third is a handler function to deal with the fault
+ * caused by the instruction in the first field.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ int insn, fixup, handler;
+};
+
extern struct exception_table_entry __stop___ex_table[];
#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
#endif
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
+
#endif /* _ASM_X86_SECTIONS_H */
@@ -8,6 +8,7 @@
#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/string.h>
+#include <asm/sections.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
@@ -90,23 +91,6 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
#define access_ok(type, addr, size) \
likely(!__range_not_ok(addr, size, user_addr_max()))
-/*
- * The exception table consists of triples of addresses relative to the
- * exception table entry itself. The first address is of an instruction
- * that is allowed to fault, the second is the target at which the program
- * should continue. The third is a handler function to deal with the fault
- * caused by the instruction in the first field.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
- int insn, fixup, handler;
-};
-
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
@@ -1,6 +1,8 @@
#ifndef _ASM_GENERIC_SECTIONS_H_
#define _ASM_GENERIC_SECTIONS_H_
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__)
+
/* References to section boundaries */
#include <linux/compiler.h>
@@ -128,4 +130,6 @@ static inline bool init_section_intersects(void *virt, size_t size)
return memory_intersects(__init_begin, __init_end, virt, size);
}
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) && !defined(__ASSEMBLY__) */
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
We'll later add some generic helpers for use in the linker scripts and some generic asm code for all architectures. To do that we'll first need to guard against linker script and asm code. On x86 uaccess.h has a struct which is used as part of a section, move that to sections.h as we otherwise have no need for uaccess.h on sections.h v3: new to this series, needed due to the collateral of the split of tables.h into 3 files: tables.h, ranges.h and sections.h, the need to guard sections.h is due to the fact that sections.h is already included and expanded considerably in certain architectures. Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org> --- arch/arm/include/asm/sections.h | 2 ++ arch/blackfin/include/asm/sections.h | 4 ++++ arch/ia64/include/asm/sections.h | 7 +++++-- arch/powerpc/include/asm/sections.h | 11 ++++++----- arch/sh/include/asm/sections.h | 2 ++ arch/sparc/include/asm/sections.h | 4 ++++ arch/tile/include/asm/sections.h | 4 ++++ arch/x86/include/asm/sections.h | 23 ++++++++++++++++++++++- arch/x86/include/asm/uaccess.h | 18 +----------------- include/asm-generic/sections.h | 4 ++++ 10 files changed, 54 insertions(+), 25 deletions(-)