@@ -9,6 +9,7 @@
#include <xen/init.h>
#include <xen/types.h>
#include <xen/kernel.h>
+#include <xen/llc-coloring.h>
#include <xen/mm.h>
#include <xen/vmap.h>
#include <xen/smp.h>
@@ -191,6 +192,27 @@ static int __apply_alternatives_multi_stop(void *xenmap)
return 0;
}
+static void __init *xen_remap_colored(mfn_t xen_mfn, paddr_t xen_size)
+{
+ unsigned int i;
+ void *xenmap;
+ mfn_t *xen_colored_mfns, mfn;
+
+ xen_colored_mfns = xmalloc_array(mfn_t, xen_size >> PAGE_SHIFT);
+ if ( !xen_colored_mfns )
+ panic("Can't allocate LLC colored MFNs\n");
+
+ for_each_xen_colored_mfn ( xen_mfn, mfn, i )
+ {
+ xen_colored_mfns[i] = mfn;
+ }
+
+ xenmap = vmap(xen_colored_mfns, xen_size >> PAGE_SHIFT);
+ xfree(xen_colored_mfns);
+
+ return xenmap;
+}
+
/*
* This function should only be called during boot and before CPU0 jump
* into the idle_loop.
@@ -209,8 +231,12 @@ void __init apply_alternatives_all(void)
* The text and inittext section are read-only. So re-map Xen to
* be able to patch the code.
*/
- xenmap = __vmap(&xen_mfn, 1U << xen_order, 1, 1, PAGE_HYPERVISOR,
- VMAP_DEFAULT);
+ if ( llc_coloring_enabled )
+ xenmap = xen_remap_colored(xen_mfn, xen_size);
+ else
+ xenmap = __vmap(&xen_mfn, 1U << xen_order, 1, 1, PAGE_HYPERVISOR,
+ VMAP_DEFAULT);
+
/* Re-mapping Xen is not expected to fail during boot. */
BUG_ON(!xenmap);
@@ -428,6 +428,61 @@ FUNC_LOCAL(fail)
b 1b
END(fail)
+/*
+ * Copy Xen to new location and switch TTBR
+ * x0 ttbr
+ * x1 source address
+ * x2 destination address
+ * x3 length
+ *
+ * Source and destination must be word aligned, length is rounded up
+ * to a 16 byte boundary.
+ *
+ * MUST BE VERY CAREFUL when saving things to RAM over the copy
+ */
+ENTRY(relocate_xen)
+ /*
+ * Copy 16 bytes at a time using:
+ * x9: counter
+ * x10: data
+ * x11: data
+ * x12: source
+ * x13: destination
+ */
+ mov x9, x3
+ mov x12, x1
+ mov x13, x2
+
+1: ldp x10, x11, [x12], #16
+ stp x10, x11, [x13], #16
+
+ subs x9, x9, #16
+ bgt 1b
+
+ /*
+ * Flush destination from dcache using:
+ * x9: counter
+ * x10: step
+ * x11: vaddr
+ *
+ * This is to ensure data is visible to the instruction cache
+ */
+ dsb sy
+
+ mov x9, x3
+ ldr x10, =dcache_line_bytes /* x10 := step */
+ ldr x10, [x10]
+ mov x11, x2
+
+1: dc cvac, x11
+
+ add x11, x11, x10
+ subs x9, x9, x10
+ bgt 1b
+
+ /* No need for dsb/isb because they are alredy done in switch_ttbr_id */
+ b switch_ttbr_id
+
/*
* Switch TTBR
*
@@ -453,7 +508,8 @@ FUNC(switch_ttbr_id)
/*
* 5) Flush I-cache
- * This should not be necessary but it is kept for safety.
+ * This should not be necessary in the general case, but it's needed
+ * for cache coloring because code is relocated in that case.
*/
ic iallu
isb
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <xen/init.h>
+#include <xen/llc-coloring.h>
#include <xen/mm.h>
#include <xen/pfn.h>
@@ -138,27 +139,46 @@ void update_boot_mapping(bool enable)
}
extern void switch_ttbr_id(uint64_t ttbr);
+extern void relocate_xen(uint64_t ttbr, void *src, void *dst, size_t len);
typedef void (switch_ttbr_fn)(uint64_t ttbr);
+typedef void (relocate_xen_fn)(uint64_t ttbr, void *src, void *dst, size_t len);
void __init switch_ttbr(uint64_t ttbr)
{
- vaddr_t id_addr = virt_to_maddr(switch_ttbr_id);
- switch_ttbr_fn *fn = (switch_ttbr_fn *)id_addr;
+ vaddr_t vaddr, id_addr;
lpae_t pte;
+ if ( llc_coloring_enabled )
+ vaddr = (vaddr_t)relocate_xen;
+ else
+ vaddr = (vaddr_t)switch_ttbr_id;
+
+ id_addr = virt_to_maddr(vaddr);
+
/* Enable the identity mapping in the boot page tables */
update_identity_mapping(true);
/* Enable the identity mapping in the runtime page tables */
- pte = pte_of_xenaddr((vaddr_t)switch_ttbr_id);
+ pte = pte_of_xenaddr(vaddr);
pte.pt.table = 1;
pte.pt.xn = 0;
pte.pt.ro = 1;
write_pte(&xen_third_id[third_table_offset(id_addr)], pte);
/* Switch TTBR */
- fn(ttbr);
+ if ( llc_coloring_enabled )
+ {
+ relocate_xen_fn *fn = (relocate_xen_fn *)id_addr;
+
+ fn(ttbr, _start, (void *)BOOT_RELOC_VIRT_START, _end - _start);
+ }
+ else
+ {
+ switch_ttbr_fn *fn = (switch_ttbr_fn *)id_addr;
+
+ fn(ttbr);
+ }
/*
* Disable the identity mapping in the runtime page tables.
@@ -30,6 +30,7 @@
* 10M - 12M Fixmap: special-purpose 4K mapping slots
* 12M - 16M Early boot mapping of FDT
* 16M - 18M Livepatch vmap (if compiled in)
+ * 16M - 24M Cache-colored Xen text, data, bss (temporary, if compiled in)
*
* 1G - 2G VMAP: ioremap and early_ioremap
*
@@ -74,6 +75,8 @@
#define BOOT_FDT_VIRT_START (FIXMAP_VIRT_START + FIXMAP_VIRT_SIZE)
#define BOOT_FDT_VIRT_SIZE _AT(vaddr_t, MB(4))
+#define BOOT_RELOC_VIRT_START (BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE)
+
#ifdef CONFIG_LIVEPATCH
#define LIVEPATCH_VMAP_START (BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE)
#define LIVEPATCH_VMAP_SIZE _AT(vaddr_t, MB(2))
@@ -9,6 +9,7 @@
#include <asm/processor.h>
#include <asm/sysregs.h>
+#include <asm/setup.h>
/* Return the LLC way size by probing the hardware */
unsigned int __init get_llc_way_size(void)
@@ -62,8 +63,70 @@ unsigned int __init get_llc_way_size(void)
return line_size * num_sets;
}
+/**
+ * get_xen_paddr - get physical address to relocate Xen to
+ *
+ * Xen is relocated to as near to the top of RAM as possible and
+ * aligned to a XEN_PADDR_ALIGN boundary.
+ */
+static paddr_t __init get_xen_paddr(paddr_t xen_size)
+{
+ const struct membanks *mem = bootinfo_get_mem();
+ paddr_t min_size, paddr = 0;
+ unsigned int i;
+
+ min_size = (xen_size + (XEN_PADDR_ALIGN-1)) & ~(XEN_PADDR_ALIGN-1);
+
+ /* Find the highest bank with enough space. */
+ for ( i = 0; i < mem->nr_banks; i++ )
+ {
+ const struct membank *bank = &mem->bank[i];
+ paddr_t s, e;
+
+ if ( bank->size >= min_size )
+ {
+ e = consider_modules(bank->start, bank->start + bank->size,
+ min_size, XEN_PADDR_ALIGN, 0);
+ if ( !e )
+ continue;
+
+#ifdef CONFIG_ARM_32
+ /* Xen must be under 4GB */
+ if ( e > GB(4) )
+ e = GB(4);
+ if ( e < bank->start )
+ continue;
+#endif
+
+ s = e - min_size;
+
+ if ( s > paddr )
+ paddr = s;
+ }
+ }
+
+ if ( !paddr )
+ panic("Not enough memory to relocate Xen\n");
+
+ printk("Placing Xen at 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
+ paddr, paddr + min_size);
+
+ return paddr;
+}
+
+static paddr_t __init xen_colored_map_size(void)
+{
+ return ROUNDUP((_end - _start) * get_max_nr_llc_colors(), XEN_PADDR_ALIGN);
+}
+
void __init arch_llc_coloring_init(void)
{
+ struct bootmodule *xen_bootmodule = boot_module_find_by_kind(BOOTMOD_XEN);
+
+ BUG_ON(!xen_bootmodule);
+
+ xen_bootmodule->size = xen_colored_map_size();
+ xen_bootmodule->start = get_xen_paddr(xen_bootmodule->size);
}
/*
@@ -23,6 +23,9 @@
#undef virt_to_mfn
#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+#define virt_to_reloc_virt(virt) \
+ (((vaddr_t)virt) - XEN_VIRT_START + BOOT_RELOC_VIRT_START)
+
/* Main runtime page tables */
/*
@@ -72,6 +75,7 @@ static void __init __maybe_unused build_assertions(void)
/* 2MB aligned regions */
BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
+ BUILD_BUG_ON(BOOT_RELOC_VIRT_START & ~SECOND_MASK);
/* 1GB aligned regions */
#ifdef CONFIG_ARM_32
BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
@@ -141,6 +145,9 @@ static void __init __maybe_unused build_assertions(void)
lpae_t __init pte_of_xenaddr(vaddr_t va)
{
+ if ( llc_coloring_enabled )
+ va = virt_to_reloc_virt(va);
+
return mfn_to_xen_entry(virt_to_mfn(va), MT_NORMAL);
}
@@ -319,9 +326,44 @@ paddr_t __init consider_modules(paddr_t s, paddr_t e,
return e;
}
+static void __init create_llc_coloring_mappings(void)
+{
+ lpae_t pte;
+ unsigned int i;
+ struct bootmodule *xen_bootmodule = boot_module_find_by_kind(BOOTMOD_XEN);
+ mfn_t start_mfn = maddr_to_mfn(xen_bootmodule->start), mfn;
+
+ for_each_xen_colored_mfn ( start_mfn, mfn, i )
+ {
+ pte = mfn_to_xen_entry(mfn, MT_NORMAL);
+ pte.pt.table = 1; /* level 3 mappings always have this bit set */
+ xen_xenmap[i] = pte;
+ }
+
+ for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
+ {
+ vaddr_t va = BOOT_RELOC_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
+
+ pte = mfn_to_xen_entry(virt_to_mfn(xen_xenmap +
+ i * XEN_PT_LPAE_ENTRIES),
+ MT_NORMAL);
+ pte.pt.table = 1;
+ write_pte(&boot_second[second_table_offset(va)], pte);
+ }
+}
+
/*
- * Boot-time pagetable setup.
+ * Boot-time pagetable setup with coloring support
* Changes here may need matching changes in head.S
+ *
+ * The cache coloring support consists of:
+ * - Create colored mapping that conforms to Xen color selection in xen_xenmap[]
+ * - Link the mapping in boot page tables using BOOT_RELOC_VIRT_START as vaddr
+ * - pte_of_xenaddr() takes care of translating addresses to the new space
+ * during runtime page tables creation
+ * - Relocate xen and update TTBR with the new address in the colored space
+ * (see switch_ttbr())
+ * - Protect the new space
*/
void __init setup_pagetables(void)
{
@@ -329,6 +371,9 @@ void __init setup_pagetables(void)
lpae_t pte, *p;
int i;
+ if ( llc_coloring_enabled )
+ create_llc_coloring_mappings();
+
arch_setup_page_tables();
#ifdef CONFIG_ARM_64
@@ -356,13 +401,7 @@ void __init setup_pagetables(void)
break;
pte = pte_of_xenaddr(va);
pte.pt.table = 1; /* third level mappings always have this bit set */
- if ( is_kernel_text(va) || is_kernel_inittext(va) )
- {
- pte.pt.xn = 0;
- pte.pt.ro = 1;
- }
- if ( is_kernel_rodata(va) )
- pte.pt.ro = 1;
+ pte.pt.xn = 0; /* Permissions will be enforced later. Allow execution */
xen_xenmap[i] = pte;
}
@@ -388,13 +427,48 @@ void __init setup_pagetables(void)
ttbr = virt_to_maddr(cpu0_pgtable);
#endif
- switch_ttbr(ttbr);
-
- xen_pt_enforce_wnx();
-
#ifdef CONFIG_ARM_32
per_cpu(xen_pgtable, 0) = cpu0_pgtable;
#endif
+
+ if ( llc_coloring_enabled )
+ ttbr = virt_to_maddr(virt_to_reloc_virt(THIS_CPU_PGTABLE));
+
+ switch_ttbr(ttbr);
+
+ /* Protect Xen */
+ for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
+ {
+ vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
+ lpae_t *entry = xen_xenmap + i;
+
+ if ( !is_kernel(va) )
+ break;
+
+ pte = read_atomic(entry);
+
+ if ( is_kernel_text(va) || is_kernel_inittext(va) )
+ {
+ pte.pt.xn = 0;
+ pte.pt.ro = 1;
+ } else if ( is_kernel_rodata(va) ) {
+ pte.pt.ro = 1;
+ pte.pt.xn = 1;
+ } else {
+ pte.pt.xn = 1;
+ pte.pt.ro = 0;
+ }
+
+ write_pte(entry, pte);
+ }
+
+ /*
+ * We modified live page-tables. Ensure the TLBs are invalidated
+ * before setting enforcing the WnX permissions.
+ */
+ flush_xen_tlb_local();
+
+ xen_pt_enforce_wnx();
}
void *__init arch_vmap_virt_end(void)
@@ -304,8 +304,6 @@ void asmlinkage __init start_xen(unsigned long fdt_paddr)
/* Initialize traps early allow us to get backtrace when an error occurred */
init_traps();
- setup_pagetables();
-
smp_clear_cpu_maps();
device_tree_flattened = early_fdt_map(fdt_paddr);
@@ -329,6 +327,14 @@ void asmlinkage __init start_xen(unsigned long fdt_paddr)
llc_coloring_init();
+ /*
+ * Page tables must be setup after LLC coloring initialization because
+ * coloring info are required in order to create colored mappings
+ */
+ setup_pagetables();
+ /* Device-tree was mapped in boot page tables, remap it in the new tables */
+ device_tree_flattened = early_fdt_map(fdt_paddr);
+
setup_mm();
vm_init();
@@ -33,6 +33,8 @@ static unsigned int __ro_after_init xen_num_colors;
#define mfn_color_mask (max_nr_colors - 1)
#define mfn_to_color(mfn) (mfn_x(mfn) & mfn_color_mask)
+#define get_mfn_with_color(mfn, color) \
+ (_mfn((mfn_x(mfn) & ~mfn_color_mask) | (color)))
/*
* Parse the coloring configuration given in the buf string, following the
@@ -349,6 +351,22 @@ unsigned int get_max_nr_llc_colors(void)
return max_nr_colors;
}
+mfn_t __init xen_colored_mfn(mfn_t mfn)
+{
+ unsigned int i, color = mfn_to_color(mfn);
+
+ for ( i = 0; i < xen_num_colors; i++ )
+ {
+ if ( color == xen_colors[i] )
+ return mfn;
+ else if ( color < xen_colors[i] )
+ return get_mfn_with_color(mfn, xen_colors[i]);
+ }
+
+ /* Jump to next color space (max_nr_colors mfns) and use the first color */
+ return get_mfn_with_color(mfn_add(mfn, max_nr_colors), xen_colors[0]);
+}
+
/*
* Local variables:
* mode: C
@@ -26,6 +26,17 @@ static inline void domain_dump_llc_colors(const struct domain *d) {}
static inline void domain_llc_coloring_free(struct domain *d) {}
#endif
+/**
+ * Iterate over each Xen mfn in the colored space.
+ * @start_mfn: the first mfn that needs to be colored.
+ * @mfn: the current mfn.
+ * @i: loop index.
+ */
+#define for_each_xen_colored_mfn(start_mfn, mfn, i) \
+ for ( i = 0, mfn = xen_colored_mfn(start_mfn); \
+ i < (_end - _start) >> PAGE_SHIFT; \
+ i++, mfn = xen_colored_mfn(mfn_add(mfn, 1)) )
+
unsigned int get_llc_way_size(void);
void arch_llc_coloring_init(void);
int dom0_set_llc_colors(struct domain *d);
@@ -37,6 +48,8 @@ struct page_info;
unsigned int page_to_llc_color(const struct page_info *pg);
unsigned int get_max_nr_llc_colors(void);
+mfn_t xen_colored_mfn(mfn_t mfn);
+
#endif /* __COLORING_H__ */
/*