@@ -2535,6 +2535,14 @@ config MITIGATION_ADDRESS_SPACE_ISOLATION
This dependencies will later be removed with extensions to the KASAN
implementation.
+config ADDRESS_SPACE_ISOLATION_DEFAULT_ON
+ bool "Enable address space isolation by default"
+ default n
+ depends on ADDRESS_SPACE_ISOLATION
+ help
+ If selected, ASI is enabled by default at boot if the asi=on or
+ asi=off are not specified.
+
config MITIGATION_RETPOLINE
bool "Avoid speculative indirect branches in kernel"
select OBJTOOL if HAVE_OBJTOOL
@@ -6,6 +6,7 @@
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
+#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <linux/sched.h>
@@ -64,6 +65,9 @@
* the N ASI classes.
*/
+/* Try to avoid this outside of hot code (see comment on _static_cpu_has). */
+#define static_asi_enabled() cpu_feature_enabled(X86_FEATURE_ASI)
+
#define ASI_MAX_NUM_ORDER 2
#define ASI_MAX_NUM (1 << ASI_MAX_NUM_ORDER)
@@ -101,6 +105,8 @@ struct asi {
DECLARE_PER_CPU_ALIGNED(struct asi *, curr_asi);
+void asi_check_boottime_disable(void);
+
void asi_init_mm_state(struct mm_struct *mm);
int asi_register_class(const char *name, const struct asi_hooks *ops);
@@ -124,7 +130,9 @@ void asi_exit(void);
/* The target is the domain we'll enter when returning to process context. */
static __always_inline struct asi *asi_get_target(struct task_struct *p)
{
- return p->thread.asi_state.target;
+ return static_asi_enabled()
+ ? p->thread.asi_state.target
+ : NULL;
}
static __always_inline void asi_set_target(struct task_struct *p,
@@ -135,7 +143,9 @@ static __always_inline void asi_set_target(struct task_struct *p,
static __always_inline struct asi *asi_get_current(void)
{
- return this_cpu_read(curr_asi);
+ return static_asi_enabled()
+ ? this_cpu_read(curr_asi)
+ : NULL;
}
/* Are we currently in a restricted address space? */
@@ -144,7 +154,11 @@ static __always_inline bool asi_is_restricted(void)
return (bool)asi_get_current();
}
-/* If we exit/have exited, can we stay that way until the next asi_enter? */
+/*
+ * If we exit/have exited, can we stay that way until the next asi_enter?
+ *
+ * When ASI is disabled, this returns true.
+ */
static __always_inline bool asi_is_relaxed(void)
{
return !asi_get_target(current);
@@ -470,6 +470,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
+#define X86_FEATURE_ASI (21*32+5) /* Kernel Address Space Isolation */
/*
* BUG word(s)
@@ -50,6 +50,12 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+# define DISABLE_ASI 0
+#else
+# define DISABLE_ASI (1 << (X86_FEATURE_ASI & 31))
+#endif
+
#ifdef CONFIG_MITIGATION_RETPOLINE
# define DISABLE_RETPOLINE 0
#else
@@ -154,7 +160,7 @@
#define DISABLED_MASK17 0
#define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 (DISABLE_SEV_SNP)
-#define DISABLED_MASK20 0
+#define DISABLED_MASK20 (DISABLE_ASI)
#define DISABLED_MASK21 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
@@ -4,7 +4,9 @@
#include <linux/percpu.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/asi.h>
+#include <asm/cmdline.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
@@ -28,6 +30,9 @@ int asi_register_class(const char *name, const struct asi_hooks *ops)
{
int i;
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return 0;
+
VM_BUG_ON(name == NULL);
spin_lock(&asi_class_lock);
@@ -52,6 +57,9 @@ EXPORT_SYMBOL_GPL(asi_register_class);
void asi_unregister_class(int index)
{
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return;
+
BUG_ON(!asi_index_valid(index));
spin_lock(&asi_class_lock);
@@ -63,11 +71,36 @@ void asi_unregister_class(int index)
}
EXPORT_SYMBOL_GPL(asi_unregister_class);
+void __init asi_check_boottime_disable(void)
+{
+ bool enabled = IS_ENABLED(CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION_DEFAULT_ON);
+ char arg[4];
+ int ret;
+
+ ret = cmdline_find_option(boot_command_line, "asi", arg, sizeof(arg));
+ if (ret == 3 && !strncmp(arg, "off", 3)) {
+ enabled = false;
+ pr_info("ASI disabled through kernel command line.\n");
+ } else if (ret == 2 && !strncmp(arg, "on", 2)) {
+ enabled = true;
+ pr_info("Ignoring asi=on param while ASI implementation is incomplete.\n");
+ } else {
+ pr_info("ASI %s by default.\n",
+ enabled ? "enabled" : "disabled");
+ }
+
+ if (enabled)
+ pr_info("ASI enablement ignored due to incomplete implementation.\n");
+}
static void __asi_destroy(struct asi *asi)
{
- lockdep_assert_held(&asi->mm->asi_init_lock);
+ WARN_ON_ONCE(asi->ref_count <= 0);
+ if (--(asi->ref_count) > 0)
+ return;
+ free_pages((ulong)asi->pgd, PGD_ALLOCATION_ORDER);
+ memset(asi, 0, sizeof(struct asi));
}
int asi_init(struct mm_struct *mm, int asi_index, struct asi **out_asi)
@@ -77,6 +110,9 @@ int asi_init(struct mm_struct *mm, int asi_index, struct asi **out_asi)
*out_asi = NULL;
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return 0;
+
BUG_ON(!asi_index_valid(asi_index));
asi = &mm->asi[asi_index];
@@ -121,7 +157,7 @@ void asi_destroy(struct asi *asi)
{
struct mm_struct *mm;
- if (!asi)
+ if (!boot_cpu_has(X86_FEATURE_ASI) || !asi)
return;
mm = asi->mm;
@@ -130,11 +166,7 @@ void asi_destroy(struct asi *asi)
* to block concurrent asi_init calls.
*/
mutex_lock(&mm->asi_init_lock);
- WARN_ON_ONCE(asi->ref_count <= 0);
- if (--(asi->ref_count) == 0) {
- free_pages((ulong)asi->pgd, PGD_ALLOCATION_ORDER);
- memset(asi, 0, sizeof(struct asi));
- }
+ __asi_destroy(asi);
mutex_unlock(&mm->asi_init_lock);
}
EXPORT_SYMBOL_GPL(asi_destroy);
@@ -178,6 +210,9 @@ static noinstr void __asi_enter(void)
noinstr void asi_enter(struct asi *asi)
{
+ if (!static_asi_enabled())
+ return;
+
VM_WARN_ON_ONCE(!asi);
asi_set_target(current, asi);
@@ -189,8 +224,10 @@ EXPORT_SYMBOL_GPL(asi_enter);
inline_or_noinstr void asi_relax(void)
{
- barrier();
- asi_set_target(current, NULL);
+ if (static_asi_enabled()) {
+ barrier();
+ asi_set_target(current, NULL);
+ }
}
EXPORT_SYMBOL_GPL(asi_relax);
@@ -199,6 +236,9 @@ noinstr void asi_exit(void)
u64 unrestricted_cr3;
struct asi *asi;
+ if (!static_asi_enabled())
+ return;
+
preempt_disable_notrace();
VM_BUG_ON(this_cpu_read(cpu_tlbstate.loaded_mm) ==
@@ -229,6 +269,9 @@ EXPORT_SYMBOL_GPL(asi_exit);
void asi_init_mm_state(struct mm_struct *mm)
{
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return;
+
memset(mm->asi, 0, sizeof(mm->asi));
mutex_init(&mm->asi_init_lock);
}
@@ -27,6 +27,7 @@
#include <asm/text-patching.h>
#include <asm/memtype.h>
#include <asm/paravirt.h>
+#include <asm/asi.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -250,7 +251,7 @@ static void __init probe_page_size_mask(void)
__default_kernel_pte_mask = __supported_pte_mask;
/* Except when with PTI where the kernel is mostly non-Global: */
if (cpu_feature_enabled(X86_FEATURE_PTI) ||
- IS_ENABLED(CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION))
+ cpu_feature_enabled(X86_FEATURE_ASI))
__default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/* Enable 1 GB linear kernel mappings if available: */
@@ -757,6 +758,7 @@ void __init init_mem_mapping(void)
unsigned long end;
pti_check_boottime_disable();
+ asi_check_boottime_disable();
probe_page_size_mask();
setup_pcid();
@@ -48,6 +48,10 @@ static inline struct asi *asi_get_target(struct task_struct *p) { return NULL; }
static inline pgd_t *asi_pgd(struct asi *asi) { return NULL; }
+#define static_asi_enabled() false
+
+static inline void asi_check_boottime_disable(void) { }
+
#endif /* !_ASSEMBLY_ */
#endif /* !CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION */
Add a boot time parameter to control the newly added X86_FEATURE_ASI. "asi=on" or "asi=off" can be used in the kernel command line to enable or disable ASI at boot time. If not specified, ASI enablement depends on CONFIG_ADDRESS_SPACE_ISOLATION_DEFAULT_ON, which is off by default. asi_check_boottime_disable() is modeled after pti_check_boottime_disable(). The boot parameter is currently ignored until ASI is fully functional. Once we have a set of ASI features checked in that we have actually tested, we will stop ignoring the flag. But for now let's just add the infrastructure so we can implement the usage code. Co-developed-by: Junaid Shahid <junaids@google.com> Co-developed-by: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Brendan Jackman <jackmanb@google.com> --- arch/x86/Kconfig | 8 +++++ arch/x86/include/asm/asi.h | 20 +++++++++-- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/disabled-features.h | 8 ++++- arch/x86/mm/asi.c | 61 +++++++++++++++++++++++++++----- arch/x86/mm/init.c | 4 ++- include/asm-generic/asi.h | 4 +++ 7 files changed, 92 insertions(+), 14 deletions(-)