@@ -2,6 +2,7 @@ config RISCV
def_bool y
select FUNCTION_ALIGNMENT_16B
select GENERIC_BUG_FRAME
+ select GENERIC_PT
select HAS_DEVICE_TREE
select HAS_PMAP
@@ -42,6 +42,8 @@ static inline void *maddr_to_virt(paddr_t ma)
#define virt_to_mfn(va) __virt_to_mfn(va)
#define mfn_to_virt(mfn) __mfn_to_virt(mfn)
+#define pte_get_mfn(pte) maddr_to_mfn(pte_to_paddr(pte))
+
struct page_info
{
/* Each frame can be threaded onto a doubly-linked list. */
@@ -238,6 +240,13 @@ static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
#define PFN_ORDER(pg) ((pg)->v.free.order)
+/*
+ * Generic code that works with page tables expects the page table
+ * levels to be numbered in the order L0 -> L1 -> ...;
+ * RISC-V uses the opposite enumeration: Lx -> L1 -> L0
+ */
+#define convert_level(level) (HYP_PT_ROOT_LEVEL - level)
+
extern unsigned char cpu0_boot_stack[];
void setup_initial_pagetables(void);
@@ -3,6 +3,51 @@
#ifndef __RISCV_PAGE_BITS_H__
#define __RISCV_PAGE_BITS_H__
+/*
+ * PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+
+#define _PAGE_PRESENT BIT(0, UL)
+#define _PAGE_READ BIT(1, UL) /* Readable */
+#define _PAGE_WRITE BIT(2, UL) /* Writable */
+#define _PAGE_EXEC BIT(3, UL) /* Executable */
+#define _PAGE_USER BIT(4, UL) /* User */
+#define _PAGE_GLOBAL BIT(5, UL) /* Global */
+#define _PAGE_ACCESSED BIT(6, UL) /* Set by hardware on any access */
+#define _PAGE_DIRTY BIT(7, UL) /* Set by hardware on any write */
+#define _PAGE_SOFT BIT(8, UL) /* Reserved for software */
+
+/*
+ * There is no such bits in PTE format for RISC-V.
+ * Most of the definition below are just to make PT generic code happy,
+ * except _PAGE_BLOCK which is used to map 2 MB page table entries.
+ */
+#define _PAGE_BLOCK BIT(9, UL)
+#define _PAGE_CONTIG BIT(10, UL)
+#define _PAGE_POPULATE BIT(11, UL)
+#define MT_NORMAL BIT(12, UL)
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
+#define PAGE_AI_MASK(x) ((x) & _PAGE_CHG_MASK)
+
+#define _PAGE_W_BIT 2
+#define _PAGE_XN_BIT 3
+#define _PAGE_RO_BIT 1
+
+/* TODO: move to somewhere generic part/header ? */
+#define _PAGE_XN (1U << _PAGE_XN_BIT)
+#define _PAGE_RO (1U << _PAGE_RO_BIT)
+#define _PAGE_W (1U << _PAGE_W_BIT)
+#define PAGE_XN_MASK(x) (((x) >> _PAGE_XN_BIT) & 0x1U)
+#define PAGE_RO_MASK(x) (((x) >> _PAGE_RO_BIT) & 0x1U)
+#define PAGE_W_MASK(x) (((x) >> _PAGE_W_BIT) & 0x1U)
+
#define PAGE_SHIFT 12 /* 4 KiB Pages */
#define PADDR_BITS 56 /* 44-bit PPN */
@@ -33,6 +33,7 @@
#define PTE_LEAF_DEFAULT (PTE_VALID | PTE_READABLE | PTE_WRITABLE)
#define PTE_TABLE (PTE_VALID)
+#define PAGE_HYPERVISOR_RO (PTE_VALID | PTE_READABLE)
#define PAGE_HYPERVISOR_RW (PTE_VALID | PTE_READABLE | PTE_WRITABLE)
#define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW
@@ -42,13 +43,68 @@
#define pt_index(lvl, va) (pt_linear_offset((lvl), (va)) & VPN_MASK)
-/* Page Table entry */
+#define FIRST_SIZE (XEN_PT_LEVEL_SIZE(2))
+
+#define TABLE_OFFSET(offs) (_AT(unsigned int, offs) & ((_AC(1, U) << PAGETABLE_ORDER) - 1))
+
+#if RV_STAGE1_MODE > SATP_MODE_SV48
+#error "need to to update DECLARE_OFFSETS macros"
+#else
+
+#define l0_table_offset(va) TABLE_OFFSET(pt_linear_offset(0, va))
+#define l1_table_offset(va) TABLE_OFFSET(pt_linear_offset(1, va))
+#define l2_table_offset(va) TABLE_OFFSET(pt_linear_offset(2, va))
+#define l3_table_offset(va) TABLE_OFFSET(pt_linear_offset(3, va))
+
+/* Generate an array @var containing the offset for each level from @addr */
+#define DECLARE_OFFSETS(var, addr) \
+ const unsigned int var[4] = { \
+ l0_table_offset(addr), \
+ l1_table_offset(addr), \
+ l2_table_offset(addr), \
+ l3_table_offset(addr) \
+ }
+
+#endif
+
typedef struct {
+ unsigned long v:1;
+ unsigned long r:1;
+ unsigned long w:1;
+ unsigned long x:1;
+ unsigned long u:1;
+ unsigned long g:1;
+ unsigned long a:1;
+ unsigned long d:1;
+ unsigned long rsw:2;
+#if RV_STAGE1_MODE == SATP_MODE_SV39
+ unsigned long ppn0:9;
+ unsigned long ppn1:9;
+ unsigned long ppn2:26;
+ unsigned long rsw2:7;
+ unsigned long pbmt:2;
+ unsigned long n:1;
+#elif RV_STAGE1_MODE == SATP_MODE_SV48
+ unsigned long ppn0:9;
+ unsigned long ppn1:9;
+ unsigned long ppn2:9;
+ unsigned long ppn3:17;
+ unsigned long rsw2:7;
+ unsigned long pbmt:2;
+ unsigned long n:1;
+#else
+#error "Add proper bits for SATP_MODE"
+#endif
+} pt_t;
+
+/* Page Table entry */
+typedef union {
#ifdef CONFIG_RISCV_64
uint64_t pte;
#else
uint32_t pte;
#endif
+pt_t bits;
} pte_t;
pte_t mfn_to_xen_entry(mfn_t mfn, unsigned int attr);
@@ -69,6 +125,21 @@ static inline bool pte_is_valid(pte_t p)
return p.pte & PTE_VALID;
}
+inline bool pte_is_table(const pte_t p, unsigned int level)
+{
+ (void) level;
+
+ return (((p.pte) & (PTE_VALID
+ | PTE_READABLE
+ | PTE_WRITABLE
+ | PTE_EXECUTABLE)) == PTE_VALID);
+}
+
+static inline bool pte_is_mapping(const pte_t pte, unsigned int level)
+{
+ return !pte_is_table(pte, level);
+}
+
static inline void invalidate_icache(void)
{
BUG_ON("unimplemented");
@@ -362,13 +362,64 @@ int destroy_xen_mappings(unsigned long s, unsigned long e)
return -1;
}
-int map_pages_to_xen(unsigned long virt,
- mfn_t mfn,
- unsigned long nr_mfns,
- unsigned int flags)
+const mfn_t get_root_page(void)
{
- BUG_ON("unimplemented");
- return -1;
+ unsigned long root_maddr = csr_read(CSR_SATP) << PAGE_SHIFT;
+
+ return maddr_to_mfn(root_maddr);
+}
+
+/*
+ * Check whether the contiguous bit can be set. Return the number of
+ * contiguous entry allowed. If not allowed, return 1.
+ */
+unsigned int xen_pt_check_contig(unsigned long vfn, mfn_t mfn,
+ unsigned int level, unsigned long left,
+ unsigned int flags)
+{
+ /* there is no contig bit in RISC-V */
+ return 1;
+}
+
+void set_pte_table_bit(pte_t *pte, unsigned int tbl_bit_val)
+{
+ /* table bit for RISC-V is always equal to PTE_TABLE */
+ (void) tbl_bit_val;
+
+ pte->pte |= PTE_TABLE;
+}
+
+bool sanity_arch_specific_pte_checks(pte_t entry)
+{
+ /* there is no RISC-V specific PTE checks */
+ return true;
+}
+
+unsigned int get_contig_bit(pte_t entry)
+{
+ /* there is no contig bit */
+ (void) entry;
+
+ return 0;
+}
+
+void set_pte_permissions(pte_t *pte, unsigned int flags)
+{
+ pte->bits.r = PAGE_RO_MASK(flags);
+ pte->bits.x = ~PAGE_XN_MASK(flags);
+ pte->bits.w = PAGE_W_MASK(flags);
+
+ pte->pte |= PTE_ACCESSED | PTE_DIRTY;
+}
+
+inline void flush_xen_tlb_range_va(vaddr_t va,
+ unsigned long size)
+{
+ /* TODO: implement flush of specific range va */
+ (void) va;
+ (void) size;
+
+ asm volatile("sfence.vma");
}
static inline pte_t mfn_to_pte(mfn_t mfn)
Enable GENERIC_PT functionalities for RISC-V and also introduce some RISC-V specific functions necessary to make the GENERIC_PT code work and compilable. Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com> --- Changes in V2: - newly introduced patch --- xen/arch/riscv/Kconfig | 1 + xen/arch/riscv/include/asm/mm.h | 9 ++++ xen/arch/riscv/include/asm/page-bits.h | 45 ++++++++++++++++ xen/arch/riscv/include/asm/page.h | 73 +++++++++++++++++++++++++- xen/arch/riscv/mm.c | 63 +++++++++++++++++++--- 5 files changed, 184 insertions(+), 7 deletions(-)