@@ -4,13 +4,13 @@
#include <linux/sched.h>
-#include <asm-generic/asi.h>
-
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
+#include <asm-generic/asi.h>
+
#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
/*
@@ -55,4 +55,28 @@
# define X86_CR3_PTI_PCID_USER_BIT 11
#endif
+/*
+ * An ASI identifier is included in the higher bits of PCID to use a different
+ * PCID for each restricted address space, different from the PCID of the
+ * unrestricted address space (see asi_pcid()). We use the bits directly after
+ * the bit used by PTI (if any).
+ */
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+
+#define X86_CR3_ASI_PCID_BITS 2
+
+/* Use the highest available PCID bits after the PTI bit (if any) */
+#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
+#define X86_CR3_ASI_PCID_END_BIT (X86_CR3_PTI_PCID_USER_BIT - 1)
+#else
+#define X86_CR3_ASI_PCID_END_BIT (X86_CR3_PCID_BITS - 1)
+#endif
+
+#define X86_CR3_ASI_PCID_BITS_SHIFT (X86_CR3_ASI_PCID_END_BIT - X86_CR3_ASI_PCID_BITS + 1)
+#define X86_CR3_ASI_PCID_MASK (((1UL << X86_CR3_ASI_PCID_BITS) - 1) << X86_CR3_ASI_PCID_BITS_SHIFT)
+
+#else
+#define X86_CR3_ASI_PCID_BITS 0
+#endif
+
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
@@ -425,5 +425,8 @@ static inline void __native_tlb_flush_global(unsigned long cr4)
}
unsigned long build_cr3_noinstr(pgd_t *pgd, u16 asid, unsigned long lam);
+unsigned long build_cr3_pcid_noinstr(pgd_t *pgd, u16 pcid, unsigned long lam, bool noflush);
+
+u16 asi_pcid(struct asi *asi, u16 asid);
#endif /* _ASM_X86_TLBFLUSH_H */
@@ -238,6 +238,7 @@ static __always_inline void maybe_flush_data(struct asi *next_asi)
noinstr void __asi_enter(void)
{
u64 asi_cr3;
+ u16 pcid;
struct asi *target = asi_get_target(current);
/*
@@ -266,9 +267,8 @@ noinstr void __asi_enter(void)
this_cpu_write(curr_asi, target);
maybe_flush_control(target);
- asi_cr3 = build_cr3_noinstr(target->pgd,
- this_cpu_read(cpu_tlbstate.loaded_mm_asid),
- tlbstate_lam_cr3_mask());
+ pcid = asi_pcid(target, this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+ asi_cr3 = build_cr3_pcid_noinstr(target->pgd, pcid, tlbstate_lam_cr3_mask(), false);
write_cr3(asi_cr3);
maybe_flush_data(target);
@@ -335,8 +335,8 @@ noinstr void asi_exit(void)
unrestricted_cr3 =
build_cr3_noinstr(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
- this_cpu_read(cpu_tlbstate.loaded_mm_asid),
- tlbstate_lam_cr3_mask());
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid),
+ tlbstate_lam_cr3_mask());
/* Tainting first makes reentrancy easier to reason about. */
this_cpu_or(asi_taints, ASI_TAINT_KERNEL_DATA);
@@ -13,6 +13,7 @@
#include <linux/mmu_notifier.h>
#include <linux/mmu_context.h>
+#include <asm/processor-flags.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/nospec-branch.h>
@@ -96,7 +97,10 @@
# define PTI_CONSUMED_PCID_BITS 0
#endif
-#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
+#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS - \
+ X86_CR3_ASI_PCID_BITS)
+
+static_assert(BIT(CR3_AVAIL_PCID_BITS) > TLB_NR_DYN_ASIDS);
/*
* ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
@@ -125,6 +129,11 @@ static __always_inline u16 kern_pcid(u16 asid)
*/
VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
#endif
+
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+ BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_ASI_PCID_BITS_SHIFT));
+ VM_WARN_ON_ONCE(asid & X86_CR3_ASI_PCID_MASK);
+#endif
/*
* The dynamically-assigned ASIDs that get passed in are small
* (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
@@ -153,17 +162,22 @@ static inline u16 user_pcid(u16 asid)
return ret;
}
+static __always_inline unsigned long __build_cr3(pgd_t *pgd, u16 pcid, unsigned long lam)
+{
+ return __sme_pa_nodebug(pgd) | pcid | lam;
+}
+
static __always_inline unsigned long build_cr3(pgd_t *pgd, u16 asid, unsigned long lam)
{
- unsigned long cr3 = __sme_pa_nodebug(pgd) | lam;
+ u16 pcid = 0;
if (static_cpu_has(X86_FEATURE_PCID)) {
- cr3 |= kern_pcid(asid);
+ pcid = kern_pcid(asid);
} else {
VM_WARN_ON_ONCE(asid != 0);
}
- return cr3;
+ return __build_cr3(pgd, pcid, lam);
}
noinstr unsigned long build_cr3_noinstr(pgd_t *pgd, u16 asid, unsigned long lam)
@@ -183,6 +197,19 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid,
return build_cr3(pgd, asid, lam) | CR3_NOFLUSH;
}
+noinstr unsigned long build_cr3_pcid_noinstr(pgd_t *pgd, u16 pcid,
+ unsigned long lam, bool noflush)
+{
+ u64 noflush_bit = 0;
+
+ if (!static_cpu_has(X86_FEATURE_PCID))
+ pcid = 0;
+ else if (noflush)
+ noflush_bit = CR3_NOFLUSH;
+
+ return __build_cr3(pgd, pcid, lam) | noflush_bit;
+}
+
/*
* We get here when we do something requiring a TLB invalidation
* but could not go invalidate all of the contexts. We do the
@@ -998,6 +1025,20 @@ static void put_flush_tlb_info(void)
#endif
}
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+
+noinstr u16 asi_pcid(struct asi *asi, u16 asid)
+{
+ return kern_pcid(asid) | ((asi->class_id + 1) << X86_CR3_ASI_PCID_BITS_SHIFT);
+ // return kern_pcid(asid) | ((asi->index + 1) << X86_CR3_ASI_PCID_BITS_SHIFT);
+}
+
+#else /* CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION */
+
+u16 asi_pcid(struct asi *asi, u16 asid) { return kern_pcid(asid); }
+
+#endif /* CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION */
+
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift,
bool freed_tables)
@@ -2,6 +2,7 @@
#ifndef __ASM_GENERIC_ASI_H
#define __ASM_GENERIC_ASI_H
+#include <linux/log2.h>
#include <linux/types.h>
#ifndef _ASSEMBLY_
@@ -16,6 +17,7 @@ enum asi_class_id {
#endif
ASI_MAX_NUM_CLASSES,
};
+static_assert(order_base_2(X86_CR3_ASI_PCID_BITS) <= ASI_MAX_NUM_CLASSES);
typedef u8 asi_taints_t;