diff mbox series

[v2,2/7] xen/arm: Implement virt/maddr conversion in MPU system

Message ID 20250316192445.2376484-3-luca.fancellu@arm.com (mailing list archive)
State New
Headers show
Series MPU mm subsystem skeleton | expand

Commit Message

Luca Fancellu March 16, 2025, 7:24 p.m. UTC
From: Penny Zheng <Penny.Zheng@arm.com>

virt_to_maddr and maddr_to_virt are used widely in Xen code. So
even there is no VMSA in MPU system, we keep the interface in MPU to
to avoid changing the existing common code.

In order to do that, move the virt_to_maddr() and maddr_to_virt()
definitions to mmu/mm.h, move the include of memory management
subsystems (MMU/MPU) on a different place because the mentioned
helpers needs visibility of some macro in asm/mm.h.

Finally implement virt_to_maddr() and maddr_to_virt() for MPU systems
under mpu/mm.h, the MPU version of virt/maddr conversion is simple since
VA==PA.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
---
v2 changes:
 - add missing headers, fix header protection macro name convention
 - take into account PADDR_MASK in virt_to_maddr
 - fixed typo in commit message
 - moved also maddr_to_virt to mmu/mm.h and moved include of mmu/mpu.
 - moved introduction of mm.c footer on commit that touches that file.
---
 xen/arch/arm/include/asm/mm.h     | 64 +++++--------------------------
 xen/arch/arm/include/asm/mmu/mm.h | 56 +++++++++++++++++++++++++++
 xen/arch/arm/include/asm/mpu/mm.h | 27 +++++++++++++
 3 files changed, 92 insertions(+), 55 deletions(-)
 create mode 100644 xen/arch/arm/include/asm/mpu/mm.h

Comments

Orzel, Michal March 17, 2025, 9 a.m. UTC | #1
On 16/03/2025 20:24, Luca Fancellu wrote:
> 
> 
> From: Penny Zheng <Penny.Zheng@arm.com>
> 
> virt_to_maddr and maddr_to_virt are used widely in Xen code. So
> even there is no VMSA in MPU system, we keep the interface in MPU to
> to avoid changing the existing common code.
> 
> In order to do that, move the virt_to_maddr() and maddr_to_virt()
> definitions to mmu/mm.h, move the include of memory management
> subsystems (MMU/MPU) on a different place because the mentioned
> helpers needs visibility of some macro in asm/mm.h.
> 
> Finally implement virt_to_maddr() and maddr_to_virt() for MPU systems
> under mpu/mm.h, the MPU version of virt/maddr conversion is simple since
> VA==PA.
> 
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>

Reviewed-by: Michal Orzel <michal.orzel@amd.com>

~Michal
diff mbox series

Patch

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index a0d8e5afe977..444fd03823ec 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -14,12 +14,6 @@ 
 # error "unknown ARM variant"
 #endif
 
-#if defined(CONFIG_MMU)
-# include <asm/mmu/mm.h>
-#elif !defined(CONFIG_MPU)
-# error "Unknown memory management layout"
-#endif
-
 /* Align Xen to a 2 MiB boundary. */
 #define XEN_PADDR_ALIGN (1 << 21)
 
@@ -261,55 +255,6 @@  static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 /* Page-align address and convert to frame number format */
 #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
 
-#define virt_to_maddr(va) ({                                        \
-    vaddr_t va_ = (vaddr_t)(va);                                    \
-    (paddr_t)((va_to_par(va_) & PADDR_MASK & PAGE_MASK) | (va_ & ~PAGE_MASK)); \
-})
-
-#ifdef CONFIG_ARM_32
-/**
- * Find the virtual address corresponding to a machine address
- *
- * Only memory backing the XENHEAP has a corresponding virtual address to
- * be found. This is so we can save precious virtual space, as it's in
- * short supply on arm32. This mapping is not subject to PDX compression
- * because XENHEAP is known to be physically contiguous and can't hence
- * jump over the PDX hole. This means we can avoid the roundtrips
- * converting to/from pdx.
- *
- * @param ma Machine address
- * @return Virtual address mapped to `ma`
- */
-static inline void *maddr_to_virt(paddr_t ma)
-{
-    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
-    ma -= mfn_to_maddr(directmap_mfn_start);
-    return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
-}
-#else
-/**
- * Find the virtual address corresponding to a machine address
- *
- * The directmap covers all conventional memory accesible by the
- * hypervisor. This means it's subject to PDX compression.
- *
- * Note there's an extra offset applied (directmap_base_pdx) on top of the
- * regular PDX compression logic. Its purpose is to skip over the initial
- * range of non-existing memory, should there be one.
- *
- * @param ma Machine address
- * @return Virtual address mapped to `ma`
- */
-static inline void *maddr_to_virt(paddr_t ma)
-{
-    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
-           (DIRECTMAP_SIZE >> PAGE_SHIFT));
-    return (void *)(XENHEAP_VIRT_START -
-                    (directmap_base_pdx << PAGE_SHIFT) +
-                    maddr_to_directmapoff(ma));
-}
-#endif
-
 /*
  * Translate a guest virtual address to a machine address.
  * Return the fault information if the translation has failed else 0.
@@ -340,6 +285,15 @@  static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa,
 #define virt_to_mfn(va)     __virt_to_mfn(va)
 #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
 
+/* Memory management subsystem header placed here to see the above macros */
+#if defined(CONFIG_MMU)
+# include <asm/mmu/mm.h>
+#elif defined(CONFIG_MPU)
+# include <asm/mpu/mm.h>
+#else
+#error "Unknown memory management layout"
+#endif
+
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *virt_to_page(const void *v)
 {
diff --git a/xen/arch/arm/include/asm/mmu/mm.h b/xen/arch/arm/include/asm/mmu/mm.h
index f5a00558c47b..6737c3ede783 100644
--- a/xen/arch/arm/include/asm/mmu/mm.h
+++ b/xen/arch/arm/include/asm/mmu/mm.h
@@ -2,6 +2,13 @@ 
 #ifndef __ARM_MMU_MM_H__
 #define __ARM_MMU_MM_H__
 
+#include <xen/bug.h>
+#include <xen/pdx.h>
+#include <xen/types.h>
+#include <asm/mm.h>
+#include <asm/mmu/layout.h>
+#include <asm/page.h>
+
 /* Non-boot CPUs use this to find the correct pagetables. */
 extern uint64_t init_ttbr;
 
@@ -14,6 +21,55 @@  extern unsigned long directmap_base_pdx;
 
 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
 
+#define virt_to_maddr(va) ({                                                   \
+    vaddr_t va_ = (vaddr_t)(va);                                               \
+    (paddr_t)((va_to_par(va_) & PADDR_MASK & PAGE_MASK) | (va_ & ~PAGE_MASK)); \
+})
+
+#ifdef CONFIG_ARM_32
+/**
+ * Find the virtual address corresponding to a machine address
+ *
+ * Only memory backing the XENHEAP has a corresponding virtual address to
+ * be found. This is so we can save precious virtual space, as it's in
+ * short supply on arm32. This mapping is not subject to PDX compression
+ * because XENHEAP is known to be physically contiguous and can't hence
+ * jump over the PDX hole. This means we can avoid the roundtrips
+ * converting to/from pdx.
+ *
+ * @param ma Machine address
+ * @return Virtual address mapped to `ma`
+ */
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
+    ma -= mfn_to_maddr(directmap_mfn_start);
+    return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
+}
+#else
+/**
+ * Find the virtual address corresponding to a machine address
+ *
+ * The directmap covers all conventional memory accesible by the
+ * hypervisor. This means it's subject to PDX compression.
+ *
+ * Note there's an extra offset applied (directmap_base_pdx) on top of the
+ * regular PDX compression logic. Its purpose is to skip over the initial
+ * range of non-existing memory, should there be one.
+ *
+ * @param ma Machine address
+ * @return Virtual address mapped to `ma`
+ */
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
+           (DIRECTMAP_SIZE >> PAGE_SHIFT));
+    return (void *)(XENHEAP_VIRT_START -
+                    (directmap_base_pdx << PAGE_SHIFT) +
+                    maddr_to_directmapoff(ma));
+}
+#endif
+
 /*
  * Print a walk of a page table or p2m
  *
diff --git a/xen/arch/arm/include/asm/mpu/mm.h b/xen/arch/arm/include/asm/mpu/mm.h
new file mode 100644
index 000000000000..6cfd0f5cd2c2
--- /dev/null
+++ b/xen/arch/arm/include/asm/mpu/mm.h
@@ -0,0 +1,27 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ARM_MPU_MM_H__
+#define __ARM_MPU_MM_H__
+
+#include <xen/macros.h>
+#include <xen/page-size.h>
+#include <xen/types.h>
+
+#define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK))
+
+/* On MPU systems there is no translation, ma == va. */
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    return _p(ma);
+}
+
+#endif /* __ARM_MPU_MM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */