@@ -120,6 +120,9 @@ struct asi_taint_policy {
asi_taints_t set;
};
+extern struct asi __asi_global_nonsensitive;
+#define ASI_GLOBAL_NONSENSITIVE (&__asi_global_nonsensitive)
+
/*
* An ASI domain (struct asi) represents a restricted address space. The
* unrestricted address space (and user address space under PTI) are not
@@ -13,6 +13,7 @@
#include <asm/mmu_context.h>
#include <asm/traps.h>
+#include "mm_internal.h"
#include "../../../mm/internal.h"
static struct asi_taint_policy *taint_policies[ASI_MAX_NUM_CLASSES];
@@ -26,6 +27,13 @@ const char *asi_class_names[] = {
DEFINE_PER_CPU_ALIGNED(struct asi *, curr_asi);
EXPORT_SYMBOL(curr_asi);
+static __aligned(PAGE_SIZE) pgd_t asi_global_nonsensitive_pgd[PTRS_PER_PGD];
+
+struct asi __asi_global_nonsensitive = {
+ .pgd = asi_global_nonsensitive_pgd,
+ .mm = &init_mm,
+};
+
static inline bool asi_class_id_valid(enum asi_class_id class_id)
{
return class_id >= 0 && class_id < ASI_MAX_NUM_CLASSES;
@@ -156,6 +164,31 @@ void __init asi_check_boottime_disable(void)
pr_info("ASI enablement ignored due to incomplete implementation.\n");
}
+static int __init asi_global_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_ASI))
+ return 0;
+
+ /*
+ * Lower-level pagetables for global nonsensitive mappings are shared,
+ * but the PGD has to be copied into each domain during asi_init. To
+ * avoid needing to synchronize new mappings into pre-existing domains
+ * we just pre-allocate all of the relevant level N-1 entries so that
+ * the global nonsensitive PGD already has pointers that can be copied
+ * when new domains get asi_init()ed.
+ */
+ preallocate_sub_pgd_pages(asi_global_nonsensitive_pgd,
+ PAGE_OFFSET,
+ PAGE_OFFSET + PFN_PHYS(max_pfn) - 1,
+ "ASI Global Non-sensitive direct map");
+ preallocate_sub_pgd_pages(asi_global_nonsensitive_pgd,
+ VMALLOC_START, VMALLOC_END,
+ "ASI Global Non-sensitive vmalloc");
+
+ return 0;
+}
+subsys_initcall(asi_global_init)
+
static void __asi_destroy(struct asi *asi)
{
WARN_ON_ONCE(asi->ref_count <= 0);
@@ -170,6 +203,7 @@ int asi_init(struct mm_struct *mm, enum asi_class_id class_id, struct asi **out_
{
struct asi *asi;
int err = 0;
+ uint i;
*out_asi = NULL;
@@ -203,6 +237,9 @@ int asi_init(struct mm_struct *mm, enum asi_class_id class_id, struct asi **out_
asi->mm = mm;
asi->class_id = class_id;
+ for (i = KERNEL_PGD_BOUNDARY; i < PTRS_PER_PGD; i++)
+ set_pgd(asi->pgd + i, asi_global_nonsensitive_pgd[i]);
+
exit_unlock:
if (err)
__asi_destroy(asi);
@@ -1288,18 +1288,15 @@ static void __init register_page_bootmem_info(void)
#endif
}
-/*
- * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
- * Only the level which needs to be synchronized between all page-tables is
- * allocated because the synchronization can be expensive.
- */
-static void __init preallocate_vmalloc_pages(void)
+/* Initialize empty pagetables at the level below PGD. */
+void __init preallocate_sub_pgd_pages(pgd_t *pgd_table, ulong start,
+ ulong end, const char *name)
{
unsigned long addr;
const char *lvl;
- for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
- pgd_t *pgd = pgd_offset_k(addr);
+ for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ pgd_t *pgd = pgd_offset_pgd(pgd_table, addr);
p4d_t *p4d;
pud_t *pud;
@@ -1335,7 +1332,17 @@ static void __init preallocate_vmalloc_pages(void)
* The pages have to be there now or they will be missing in
* process page-tables later.
*/
- panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
+ panic("Failed to pre-allocate %s pages for %s area\n", lvl, name);
+}
+
+/*
+ * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
+ * Only the level which needs to be synchronized between all page-tables is
+ * allocated because the synchronization can be expensive.
+ */
+static void __init preallocate_vmalloc_pages(void)
+{
+ preallocate_sub_pgd_pages(init_mm.pgd, VMALLOC_START, VMEMORY_END, "vmalloc");
}
void __init mem_init(void)
@@ -25,4 +25,7 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
extern unsigned long tlb_single_page_flush_ceiling;
+extern void preallocate_sub_pgd_pages(pgd_t *pgd_table, ulong start,
+ ulong end, const char *name);
+
#endif /* __X86_MM_INTERNAL_H */
@@ -23,6 +23,8 @@ typedef u8 asi_taints_t;
#ifndef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+#define ASI_GLOBAL_NONSENSITIVE NULL
+
struct asi_hooks {};
struct asi {};