@@ -3,6 +3,7 @@ config RISCV
select FUNCTION_ALIGNMENT_16B
select GENERIC_BUG_FRAME
select HAS_DEVICE_TREE
+ select HAS_PMAP
select HAS_VMAP
config RISCV_64
@@ -5,6 +5,12 @@
#include <xen/bug.h>
#include <xen/cpumask.h>
+/* Flush TLB of local processor for address va. */
+static inline void flush_tlb_one_local(vaddr_t va)
+{
+ asm volatile ( "sfence.vma %0" :: "r" (va) : "memory" );
+}
+
/*
* Filter the given set of CPUs, removing those that definitely flushed their
* TLB since @page_timestamp.
@@ -94,6 +94,12 @@ static inline pte_t read_pte(const pte_t *p)
return read_atomic(p);
}
+static inline pte_t pte_from_mfn(mfn_t mfn, unsigned int flags)
+{
+ unsigned long pte = (mfn_x(mfn) << PTE_PPN_SHIFT) | flags;
+ return (pte_t){ .pte = pte };
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PAGE_H */
new file mode 100644
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_PMAP_H
+#define ASM_PMAP_H
+
+#include <xen/bug.h>
+#include <xen/init.h>
+#include <xen/mm.h>
+#include <xen/page-size.h>
+
+#include <asm/fixmap.h>
+#include <asm/flushtlb.h>
+#include <asm/system.h>
+
+static inline void __init arch_pmap_map(unsigned int slot, mfn_t mfn)
+{
+ pte_t *entry = &xen_fixmap[slot];
+ pte_t pte;
+
+ ASSERT(!pte_is_valid(*entry));
+
+ pte = pte_from_mfn(mfn, PAGE_HYPERVISOR_RW);
+ write_pte(entry, pte);
+
+ flush_tlb_one_local(FIXMAP_ADDR(slot));
+}
+
+static inline void __init arch_pmap_unmap(unsigned int slot)
+{
+ pte_t pte = {};
+
+ write_pte(&xen_fixmap[slot], pte);
+
+ flush_tlb_one_local(FIXMAP_ADDR(slot));
+}
+
+#endif /* ASM_PMAP_H */