@@ -280,6 +280,19 @@ static inline paddr_t __virt_to_maddr(vaddr_t va)
#define virt_to_maddr(va) __virt_to_maddr((vaddr_t)(va))
#ifdef CONFIG_ARM_32
+/**
+ * Find the virtual address corresponding to a machine address
+ *
+ * Only memory backing the XENHEAP has a corresponding virtual address to
+ * be found. This is so we can save precious virtual space, as it's in
+ * short supply on arm32. This mapping is not subject to PDX compression
+ * because XENHEAP is known to be physically contiguous and can't hence
+ * jump over the PDX hole. This means we can avoid the roundtrips
+ * converting to/from pdx.
+ *
+ * @param ma Machine address
+ * @return Virtual address mapped to `ma`
+ */
static inline void *maddr_to_virt(paddr_t ma)
{
ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
@@ -287,6 +300,19 @@ static inline void *maddr_to_virt(paddr_t ma)
return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
}
#else
+/**
+ * Find the virtual address corresponding to a machine address
+ *
+ * The directmap covers all conventional memory accesible by the
+ * hypervisor. This means it's subject to PDX compression.
+ *
+ * Note there's an extra offset applied (directmap_base_pdx) on top of the
+ * regular PDX compression logic. Its purpose is to skip over the initial
+ * range of non-existing memory, should there be one.
+ *
+ * @param ma Machine address
+ * @return Virtual address mapped to `ma`
+ */
static inline void *maddr_to_virt(paddr_t ma)
{
ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
arm32 merely covers the XENHEAP, whereas arm64 currently covers anything in the frame table. These comments highlight why arm32 doesn't need to account for PDX compression in its __va() implementation while arm64 does. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com> --- v2: * Removed statement about "containing GiB" (Julien) --- xen/arch/arm/include/asm/mm.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+)