@@ -5,12 +5,19 @@
* purpose area (VMAP_DEFAULT) and a livepatch-specific area (VMAP_XEN). The
* latter is used when loading livepatches and the former for everything else.
*/
-#if !defined(__XEN_VMAP_H__) && defined(VMAP_VIRT_START)
+#ifndef __XEN_VMAP_H__
#define __XEN_VMAP_H__
#include <xen/mm-frame.h>
#include <xen/page-size.h>
+/*
+ * MPU systems won't have HAS_VMAP enabled, but will provide implementation
+ * only for some of the functions of this module. So hide the definition for
+ * some of these function to systems where !HAS_VMAP
+ */
+#ifdef CONFIG_HAS_VMAP
+
/* Identifiers for the linear ranges tracked by vmap */
enum vmap_region {
/*
@@ -68,25 +75,6 @@ void *__vmap(const mfn_t *mfn, unsigned int granularity, unsigned int nr,
*/
void *vmap(const mfn_t *mfn, unsigned int nr);
-/*
- * Maps physically contiguous pages onto the VMAP_DEFAULT vmap region
- *
- * @param mfn Base mfn of the physical region
- * @param nr Number of mfns in the physical region
- * @return Pointer to the mapped area on success; NULL otherwise.
- */
-void *vmap_contig(mfn_t mfn, unsigned int nr);
-
-/*
- * Unmaps a range of virtually contiguous memory from one of the vmap regions
- *
- * The system remembers internally how wide the mapping is and unmaps it all.
- * It also can determine the vmap region type from the `va`.
- *
- * @param va Virtual base address of the range to unmap
- */
-void vunmap(const void *va);
-
/*
* Allocate `size` octets of possibly non-contiguous physical memory and map
* them contiguously in the VMAP_DEFAULT vmap region
@@ -112,6 +100,33 @@ void *vzalloc(size_t size);
*/
void vfree(void *va);
+/* Return the number of pages in the mapping starting at address 'va' */
+unsigned int vmap_size(const void *va);
+
+/* Pointer to 1 octet past the end of the VMAP_DEFAULT virtual area */
+void *arch_vmap_virt_end(void);
+
+#endif /* CONFIG_HAS_VMAP */
+
+/*
+ * Maps physically contiguous pages onto the VMAP_DEFAULT vmap region
+ *
+ * @param mfn Base mfn of the physical region
+ * @param nr Number of mfns in the physical region
+ * @return Pointer to the mapped area on success; NULL otherwise.
+ */
+void *vmap_contig(mfn_t mfn, unsigned int nr);
+
+/*
+ * Unmaps a range of virtually contiguous memory from one of the vmap regions
+ *
+ * The system remembers internally how wide the mapping is and unmaps it all.
+ * It also can determine the vmap region type from the `va`.
+ *
+ * @param va Virtual base address of the range to unmap
+ */
+void vunmap(const void *va);
+
/*
* Analogous to vmap_contig(), but for IO memory
*
@@ -124,9 +139,6 @@ void vfree(void *va);
*/
void __iomem *ioremap(paddr_t pa, size_t len);
-/* Return the number of pages in the mapping starting at address 'va' */
-unsigned int vmap_size(const void *va);
-
/* Analogous to vunmap(), but for IO memory mapped via ioremap() */
static inline void iounmap(void __iomem *va)
{
@@ -135,9 +147,6 @@ static inline void iounmap(void __iomem *va)
vunmap((void *)(addr & PAGE_MASK));
}
-/* Pointer to 1 octet past the end of the VMAP_DEFAULT virtual area */
-void *arch_vmap_virt_end(void);
-
/* Initialises the VMAP_DEFAULT virtual range */
static inline void vm_init(void)
{
@@ -40,20 +40,46 @@
((typeof(ptr))_xvrealloc(ptr, offsetof(typeof(*(ptr)), field[nr]), \
__alignof__(typeof(*(ptr)))))
+#ifdef CONFIG_HAS_VMAP
+
/* Free any of the above. */
void xvfree(void *va);
+/* Underlying functions */
+void *_xvmalloc(size_t size, unsigned int align);
+void *_xvzalloc(size_t size, unsigned int align);
+void *_xvrealloc(void *va, size_t size, unsigned int align);
+
+#else /* !CONFIG_HAS_VMAP */
+
+static inline void xvfree(void *va)
+{
+ xfree(va);
+}
+
+static inline void *_xvmalloc(size_t size, unsigned int align)
+{
+ return _xmalloc(size, align);
+}
+
+static inline void *_xvzalloc(size_t size, unsigned int align)
+{
+ return _xzalloc(size, align);
+}
+
+static inline void *_xvrealloc(void *va, size_t size, unsigned int align)
+{
+ return _xrealloc(va, size, align);
+}
+
+#endif /* CONFIG_HAS_VMAP */
+
/* Free an allocation, and zero the pointer to it. */
#define XVFREE(p) do { \
xvfree(p); \
(p) = NULL; \
} while ( false )
-/* Underlying functions */
-void *_xvmalloc(size_t size, unsigned int align);
-void *_xvzalloc(size_t size, unsigned int align);
-void *_xvrealloc(void *va, size_t size, unsigned int align);
-
static inline void *_xvmalloc_array(
size_t size, unsigned int align, unsigned long num)
{
When HAS_VMAP is disabled, the xv{malloc,zalloc,...} functions should fall back to the simple x{malloc,zalloc,...} variant, implement that because MPU systems won't have virtual memory. Additionally remove VMAP_VIRT_START from vmap.h guards since MPU systems won't have it defined and protect with #ifdef CONFIG_HAS_VMAP all the declaration that won't be used in a MPU system built without HAS_VMAP. Signed-off-by: Luca Fancellu <luca.fancellu@arm.com> --- Changes from v1: - put back static inline iounmap - changed commit message - hide not used declaration for system with !HAS_VMAP - correct function declared in xvmalloc.h to be static inline - prefer '#ifdef' instead of '#if defined' where possible --- --- xen/include/xen/vmap.h | 61 ++++++++++++++++++++++---------------- xen/include/xen/xvmalloc.h | 36 ++++++++++++++++++---- 2 files changed, 66 insertions(+), 31 deletions(-)