diff mbox series

[v4,1/5] xen/domctl, tools: Introduce a new domctl to get guest memory map

Message ID 20240409045357.236802-2-xin.wang2@amd.com (mailing list archive)
State New, archived
Headers show
Series DOMCTL-based guest magic region allocation for 11 domUs | expand

Commit Message

Henry Wang April 9, 2024, 4:53 a.m. UTC
There are some use cases where the toolstack needs to know the guest
memory map. For example, the toolstack helper application
"init-dom0less" needs to know the guest magic page regions for 1:1
direct-mapped dom0less DomUs to allocate magic pages.

To address such needs, add XEN_DOMCTL_get_mem_map hypercall and
related data structures to query the hypervisor for the guest memory
map. The guest memory map is recorded in the domain structure and
currently only guest magic page region is recorded in the guest
memory map. The guest magic page region is initialized at the domain
creation time as the layout in the public header, and it is updated
for 1:1 dom0less DomUs (see the following commit) to avoid conflict
with RAM.

Take the opportunity to drop an unnecessary empty line to keep the
coding style consistent in the file.

Reported-by: Alec Kwapis <alec.kwapis@medtronic.com>
Signed-off-by: Henry Wang <xin.wang2@amd.com>
---
RFC: I think the newly introduced "struct xen_domctl_mem_map" is
quite duplicated with "struct xen_memory_map", any comment on reuse
the "struct xen_memory_map" for simplicity?
v4:
- Drop the unnecessary initialization and printk.
- Use XEN_* prefix instead of GUEST_* for domctl.
- Move the mem region type to mem region structure.
- Drop the check of Xen internal state in the domctl.
- Handle the nr_regions properly (Fill only as much of the array
  as there is space for, but return the full count to the caller)
  to make sure the caller can know if it specifies a too small array.
v3:
- Init the return rc for XEN_DOMCTL_get_mem_map.
- Copy the nr_mem_regions back as it should be both IN & OUT.
- Check if mem_map->nr_mem_regions exceeds the XEN_MAX_MEM_REGIONS
  when adding a new entry.
- Allow XEN_MAX_MEM_REGIONS to be different between different archs.
- Add explicit padding and check to the domctl structures.
v2:
- New patch
---
 tools/include/xenctrl.h           |  4 ++++
 tools/libs/ctrl/xc_domain.c       | 37 +++++++++++++++++++++++++++++++
 xen/arch/arm/domain.c             | 14 ++++++++++++
 xen/arch/arm/domctl.c             | 28 ++++++++++++++++++++++-
 xen/arch/arm/include/asm/domain.h |  8 +++++++
 xen/include/public/arch-arm.h     |  7 ++++++
 xen/include/public/domctl.h       | 29 ++++++++++++++++++++++++
 7 files changed, 126 insertions(+), 1 deletion(-)

Comments

Jan Beulich April 18, 2024, 12:37 p.m. UTC | #1
On 09.04.2024 06:53, Henry Wang wrote:
> --- a/tools/libs/ctrl/xc_domain.c
> +++ b/tools/libs/ctrl/xc_domain.c
> @@ -697,6 +697,43 @@ int xc_domain_setmaxmem(xc_interface *xch,
>      return do_domctl(xch, &domctl);
>  }
>  
> +int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
> +                          struct xen_mem_region mem_regions[],
> +                          uint32_t *nr_regions)
> +{
> +    int rc;
> +    uint32_t nr = *nr_regions;
> +    struct xen_domctl domctl = {
> +        .cmd         = XEN_DOMCTL_get_mem_map,
> +        .domain      = domid,
> +        .u.mem_map = {
> +            .nr_mem_regions = nr,
> +        },
> +    };
> +
> +    DECLARE_HYPERCALL_BOUNCE(mem_regions, sizeof(xen_mem_region_t) * nr,
> +                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
> +
> +    if ( !mem_regions || xc_hypercall_bounce_pre(xch, mem_regions) || nr < 1 )

Why the nr < 1 part? For a caller to size the necessary buffer, it may want
to pass in 0 (and a NULL buffer pointer) first.

> @@ -176,6 +175,33 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>  
>          return rc;
>      }
> +    case XEN_DOMCTL_get_mem_map:
> +    {
> +        int rc = 0;
> +        uint32_t nr_regions;

unsigned int (see ./CODING_STYLE)?

> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -223,6 +223,13 @@ typedef uint64_t xen_pfn_t;
>   */
>  #define XEN_LEGACY_MAX_VCPUS 1
>  
> +/*
> + * Maximum number of memory map regions for guest memory layout.
> + * Used by XEN_DOMCTL_get_mem_map, currently there is only one region
> + * for the guest magic pages.
> + */
> +#define XEN_MAX_MEM_REGIONS 1

Why is this in the public header? I can only find Xen-internal uses.

> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -946,6 +946,33 @@ struct xen_domctl_paging_mempool {
>      uint64_aligned_t size; /* Size in bytes. */
>  };
>  
> +#ifndef XEN_MAX_MEM_REGIONS
> +#define XEN_MAX_MEM_REGIONS 1
> +#endif
> +
> +struct xen_mem_region {
> +    uint64_aligned_t start;
> +    uint64_aligned_t size;
> +#define XEN_MEM_REGION_DEFAULT    0

I can't spot any use of this. What's its purpose?

> +#define XEN_MEM_REGION_MAGIC      1
> +    uint32_t         type;
> +    /* Must be zero */
> +    uint32_t         pad;

This being OUT only, I don't think the comment makes sense. I'd omit it
completely; if you absolutely want one, please say "will" instead of "must".

Jan
Henry Wang April 19, 2024, 2:27 a.m. UTC | #2
Hi Jan,

On 4/18/2024 8:37 PM, Jan Beulich wrote:
> On 09.04.2024 06:53, Henry Wang wrote:
>> --- a/tools/libs/ctrl/xc_domain.c
>> +++ b/tools/libs/ctrl/xc_domain.c
>> @@ -697,6 +697,43 @@ int xc_domain_setmaxmem(xc_interface *xch,
>>       return do_domctl(xch, &domctl);
>>   }
>>   
>> +int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
>> +                          struct xen_mem_region mem_regions[],
>> +                          uint32_t *nr_regions)
>> +{
>> +    int rc;
>> +    uint32_t nr = *nr_regions;
>> +    struct xen_domctl domctl = {
>> +        .cmd         = XEN_DOMCTL_get_mem_map,
>> +        .domain      = domid,
>> +        .u.mem_map = {
>> +            .nr_mem_regions = nr,
>> +        },
>> +    };
>> +
>> +    DECLARE_HYPERCALL_BOUNCE(mem_regions, sizeof(xen_mem_region_t) * nr,
>> +                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
>> +
>> +    if ( !mem_regions || xc_hypercall_bounce_pre(xch, mem_regions) || nr < 1 )
> Why the nr < 1 part? For a caller to size the necessary buffer, it may want
> to pass in 0 (and a NULL buffer pointer) first.

I will drop this nr < 1 part.

>> @@ -176,6 +175,33 @@ long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
>>   
>>           return rc;
>>       }
>> +    case XEN_DOMCTL_get_mem_map:
>> +    {
>> +        int rc = 0;
>> +        uint32_t nr_regions;
> unsigned int (see ./CODING_STYLE)?

Ok, I will use unsigned int.

>> --- a/xen/include/public/arch-arm.h
>> +++ b/xen/include/public/arch-arm.h
>> @@ -223,6 +223,13 @@ typedef uint64_t xen_pfn_t;
>>    */
>>   #define XEN_LEGACY_MAX_VCPUS 1
>>   
>> +/*
>> + * Maximum number of memory map regions for guest memory layout.
>> + * Used by XEN_DOMCTL_get_mem_map, currently there is only one region
>> + * for the guest magic pages.
>> + */
>> +#define XEN_MAX_MEM_REGIONS 1
> Why is this in the public header? I can only find Xen-internal uses.

It will also be used in the init-dom0less app which is the toolstack side.

>> --- a/xen/include/public/domctl.h
>> +++ b/xen/include/public/domctl.h
>> @@ -946,6 +946,33 @@ struct xen_domctl_paging_mempool {
>>       uint64_aligned_t size; /* Size in bytes. */
>>   };
>>   
>> +#ifndef XEN_MAX_MEM_REGIONS
>> +#define XEN_MAX_MEM_REGIONS 1
>> +#endif
>> +
>> +struct xen_mem_region {
>> +    uint64_aligned_t start;
>> +    uint64_aligned_t size;
>> +#define XEN_MEM_REGION_DEFAULT    0
> I can't spot any use of this. What's its purpose?

I can drop it. My original intention is to define a default type since 
the struct arch_domain should be zalloc-ed.

>> +#define XEN_MEM_REGION_MAGIC      1
>> +    uint32_t         type;
>> +    /* Must be zero */
>> +    uint32_t         pad;
> This being OUT only, I don't think the comment makes sense. I'd omit it
> completely; if you absolutely want one, please say "will" instead of "must".

Sure, I will follow your suggestion. Thanks.

Kind regards,
Henry


>
> Jan
Jan Beulich April 19, 2024, 6:16 a.m. UTC | #3
On 19.04.2024 04:27, Henry Wang wrote:
> On 4/18/2024 8:37 PM, Jan Beulich wrote:
>> On 09.04.2024 06:53, Henry Wang wrote:
>>> --- a/xen/include/public/arch-arm.h
>>> +++ b/xen/include/public/arch-arm.h
>>> @@ -223,6 +223,13 @@ typedef uint64_t xen_pfn_t;
>>>    */
>>>   #define XEN_LEGACY_MAX_VCPUS 1
>>>   
>>> +/*
>>> + * Maximum number of memory map regions for guest memory layout.
>>> + * Used by XEN_DOMCTL_get_mem_map, currently there is only one region
>>> + * for the guest magic pages.
>>> + */
>>> +#define XEN_MAX_MEM_REGIONS 1
>> Why is this in the public header? I can only find Xen-internal uses.
> 
> It will also be used in the init-dom0less app which is the toolstack side.

I've looked there. It's only a convenience to use it there. Imo you want to
do the buffer sizing dynamically (utilizing the change to the hypercall
implementation that I talked you into) and drop this constant from the
public interface.

Jan
diff mbox series

Patch

diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
index 2ef8b4e054..b25e9772a2 100644
--- a/tools/include/xenctrl.h
+++ b/tools/include/xenctrl.h
@@ -1195,6 +1195,10 @@  int xc_domain_setmaxmem(xc_interface *xch,
                         uint32_t domid,
                         uint64_t max_memkb);
 
+int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
+                          struct xen_mem_region mem_regions[],
+                          uint32_t *nr_regions);
+
 int xc_domain_set_memmap_limit(xc_interface *xch,
                                uint32_t domid,
                                unsigned long map_limitkb);
diff --git a/tools/libs/ctrl/xc_domain.c b/tools/libs/ctrl/xc_domain.c
index f2d9d14b4d..4dba55d01d 100644
--- a/tools/libs/ctrl/xc_domain.c
+++ b/tools/libs/ctrl/xc_domain.c
@@ -697,6 +697,43 @@  int xc_domain_setmaxmem(xc_interface *xch,
     return do_domctl(xch, &domctl);
 }
 
+int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
+                          struct xen_mem_region mem_regions[],
+                          uint32_t *nr_regions)
+{
+    int rc;
+    uint32_t nr = *nr_regions;
+    struct xen_domctl domctl = {
+        .cmd         = XEN_DOMCTL_get_mem_map,
+        .domain      = domid,
+        .u.mem_map = {
+            .nr_mem_regions = nr,
+        },
+    };
+
+    DECLARE_HYPERCALL_BOUNCE(mem_regions, sizeof(xen_mem_region_t) * nr,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( !mem_regions || xc_hypercall_bounce_pre(xch, mem_regions) || nr < 1 )
+        return -1;
+
+    set_xen_guest_handle(domctl.u.mem_map.buffer, mem_regions);
+
+    rc = do_domctl(xch, &domctl);
+
+    xc_hypercall_bounce_post(xch, mem_regions);
+
+    if ( nr < domctl.u.mem_map.nr_mem_regions )
+    {
+        PERROR("Too small nr_regions %u", nr);
+        return -1;
+    }
+
+    *nr_regions = domctl.u.mem_map.nr_mem_regions;
+
+    return rc;
+}
+
 #if defined(__i386__) || defined(__x86_64__)
 int xc_domain_set_memory_map(xc_interface *xch,
                                uint32_t domid,
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 34cbfe699a..0c9761b65b 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -697,6 +697,7 @@  int arch_domain_create(struct domain *d,
 {
     unsigned int count = 0;
     int rc;
+    struct mem_map_domain *mem_map = &d->arch.mem_map;
 
     BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
 
@@ -786,6 +787,19 @@  int arch_domain_create(struct domain *d,
     d->arch.sve_vl = config->arch.sve_vl;
 #endif
 
+    if ( mem_map->nr_mem_regions < XEN_MAX_MEM_REGIONS )
+    {
+        mem_map->regions[mem_map->nr_mem_regions].start = GUEST_MAGIC_BASE;
+        mem_map->regions[mem_map->nr_mem_regions].size = GUEST_MAGIC_SIZE;
+        mem_map->regions[mem_map->nr_mem_regions].type = XEN_MEM_REGION_MAGIC;
+        mem_map->nr_mem_regions++;
+    }
+    else
+    {
+        rc = -ENOSPC;
+        goto fail;
+    }
+
     return 0;
 
 fail:
diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index ad56efb0f5..8f62719cfa 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -148,7 +148,6 @@  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
 
         return 0;
     }
-
     case XEN_DOMCTL_vuart_op:
     {
         int rc;
@@ -176,6 +175,33 @@  long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
 
         return rc;
     }
+    case XEN_DOMCTL_get_mem_map:
+    {
+        int rc = 0;
+        uint32_t nr_regions;
+
+        if ( domctl->u.mem_map.pad )
+            return -EINVAL;
+
+        /*
+         * Fill the buffer only as much of the array as there is space for,
+         * but always return the full count in the hypervisor to the caller.
+         * This way we can avoid overflowing the buffer and also make sure
+         * the caller can know if it specifies too small an array.
+         */
+        nr_regions = min(d->arch.mem_map.nr_mem_regions,
+                         domctl->u.mem_map.nr_mem_regions);
+
+        domctl->u.mem_map.nr_mem_regions = d->arch.mem_map.nr_mem_regions;
+
+        if ( copy_to_guest(domctl->u.mem_map.buffer,
+                           d->arch.mem_map.regions, nr_regions) ||
+             __copy_field_to_guest(u_domctl, domctl,
+                                   u.mem_map.nr_mem_regions) )
+            rc = -EFAULT;
+
+        return rc;
+    }
     default:
         return subarch_do_domctl(domctl, d, u_domctl);
     }
diff --git a/xen/arch/arm/include/asm/domain.h b/xen/arch/arm/include/asm/domain.h
index f1d72c6e48..a559a9e499 100644
--- a/xen/arch/arm/include/asm/domain.h
+++ b/xen/arch/arm/include/asm/domain.h
@@ -10,6 +10,7 @@ 
 #include <asm/gic.h>
 #include <asm/vgic.h>
 #include <asm/vpl011.h>
+#include <public/domctl.h>
 #include <public/hvm/params.h>
 
 struct hvm_domain
@@ -59,6 +60,11 @@  struct paging_domain {
     unsigned long p2m_total_pages;
 };
 
+struct mem_map_domain {
+    unsigned int nr_mem_regions;
+    struct xen_mem_region regions[XEN_MAX_MEM_REGIONS];
+};
+
 struct arch_domain
 {
 #ifdef CONFIG_ARM_64
@@ -77,6 +83,8 @@  struct arch_domain
 
     struct paging_domain paging;
 
+    struct mem_map_domain mem_map;
+
     struct vmmio vmmio;
 
     /* Continuable domain_relinquish_resources(). */
diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
index e167e14f8d..eba61e1ac6 100644
--- a/xen/include/public/arch-arm.h
+++ b/xen/include/public/arch-arm.h
@@ -223,6 +223,13 @@  typedef uint64_t xen_pfn_t;
  */
 #define XEN_LEGACY_MAX_VCPUS 1
 
+/*
+ * Maximum number of memory map regions for guest memory layout.
+ * Used by XEN_DOMCTL_get_mem_map, currently there is only one region
+ * for the guest magic pages.
+ */
+#define XEN_MAX_MEM_REGIONS 1
+
 typedef uint64_t xen_ulong_t;
 #define PRI_xen_ulong PRIx64
 
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index a33f9ec32b..974c07ee61 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -946,6 +946,33 @@  struct xen_domctl_paging_mempool {
     uint64_aligned_t size; /* Size in bytes. */
 };
 
+#ifndef XEN_MAX_MEM_REGIONS
+#define XEN_MAX_MEM_REGIONS 1
+#endif
+
+struct xen_mem_region {
+    uint64_aligned_t start;
+    uint64_aligned_t size;
+#define XEN_MEM_REGION_DEFAULT    0
+#define XEN_MEM_REGION_MAGIC      1
+    uint32_t         type;
+    /* Must be zero */
+    uint32_t         pad;
+};
+typedef struct xen_mem_region xen_mem_region_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_region_t);
+
+struct xen_domctl_mem_map {
+    /* IN & OUT */
+    uint32_t         nr_mem_regions;
+    /* Must be zero */
+    uint32_t         pad;
+    /* OUT */
+    XEN_GUEST_HANDLE_64(xen_mem_region_t) buffer;
+};
+typedef struct xen_domctl_mem_map xen_domctl_mem_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_map_t);
+
 #if defined(__i386__) || defined(__x86_64__)
 struct xen_domctl_vcpu_msr {
     uint32_t         index;
@@ -1277,6 +1304,7 @@  struct xen_domctl {
 #define XEN_DOMCTL_vmtrace_op                    84
 #define XEN_DOMCTL_get_paging_mempool_size       85
 #define XEN_DOMCTL_set_paging_mempool_size       86
+#define XEN_DOMCTL_get_mem_map                   87
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -1339,6 +1367,7 @@  struct xen_domctl {
         struct xen_domctl_vuart_op          vuart_op;
         struct xen_domctl_vmtrace_op        vmtrace_op;
         struct xen_domctl_paging_mempool    paging_mempool;
+        struct xen_domctl_mem_map           mem_map;
         uint8_t                             pad[128];
     } u;
 };