diff mbox

[v2,2/5] xen: Provide XEN_DMOP_add_to_physmap

Message ID 20171023090509.4338-3-ross.lagerwall@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ross Lagerwall Oct. 23, 2017, 9:05 a.m. UTC
Provide XEN_DMOP_add_to_physmap, a limited version of
XENMEM_add_to_physmap to allow a deprivileged QEMU to move VRAM when a
guest programs its BAR. It is equivalent to XENMEM_add_to_physmap with
space == XENMAPSPACE_gmfn_range.

Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
---

Changed in v2:
* Make it operate on a range.

 xen/arch/x86/hvm/dm.c          | 31 +++++++++++++++++++++++++++++++
 xen/include/public/hvm/dm_op.h | 17 +++++++++++++++++
 xen/include/xlat.lst           |  1 +
 3 files changed, 49 insertions(+)

Comments

Paul Durrant Oct. 23, 2017, 12:03 p.m. UTC | #1
> -----Original Message-----

> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of

> Ross Lagerwall

> Sent: 23 October 2017 10:05

> To: xen-devel@lists.xen.org

> Cc: Stefano Stabellini <sstabellini@kernel.org>; Wei Liu

> <wei.liu2@citrix.com>; Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>;

> George Dunlap <George.Dunlap@citrix.com>; Andrew Cooper

> <Andrew.Cooper3@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Tim

> (Xen.org) <tim@xen.org>; Ross Lagerwall <ross.lagerwall@citrix.com>; Jan

> Beulich <jbeulich@suse.com>

> Subject: [Xen-devel] [PATCH v2 2/5] xen: Provide

> XEN_DMOP_add_to_physmap

> 

> Provide XEN_DMOP_add_to_physmap, a limited version of

> XENMEM_add_to_physmap to allow a deprivileged QEMU to move VRAM

> when a

> guest programs its BAR. It is equivalent to XENMEM_add_to_physmap with

> space == XENMAPSPACE_gmfn_range.

> 

> Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>


Reviewed-by: Paul Durrant <paul.durrant@citrix.com>


...with one observation below...

> ---

> 

> Changed in v2:

> * Make it operate on a range.

> 

>  xen/arch/x86/hvm/dm.c          | 31 +++++++++++++++++++++++++++++++

>  xen/include/public/hvm/dm_op.h | 17 +++++++++++++++++

>  xen/include/xlat.lst           |  1 +

>  3 files changed, 49 insertions(+)

> 

> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c

> index 32ade95..0027567 100644

> --- a/xen/arch/x86/hvm/dm.c

> +++ b/xen/arch/x86/hvm/dm.c

> @@ -640,6 +640,36 @@ static int dm_op(const struct dmop_args *op_args)

>          break;

>      }

> 

> +    case XEN_DMOP_add_to_physmap:

> +    {

> +        struct xen_dm_op_add_to_physmap *data =

> +            &op.u.add_to_physmap;

> +        struct xen_add_to_physmap xatp = {

> +            .domid = op_args->domid,

> +            .size = data->size,

> +            .space = XENMAPSPACE_gmfn_range,

> +            .idx = data->idx,

> +            .gpfn = data->gpfn,

> +        };

> +

> +        if ( data->pad0 || data->pad1 )

> +        {

> +            rc = -EINVAL;

> +            break;

> +        }

> +

> +        rc = xenmem_add_to_physmap(d, &xatp, 0);

> +        if ( rc > 0 )

> +        {

> +            data->size -= rc;

> +            data->idx += rc;

> +            data->gpfn += rc;

> +            const_op = false;

> +            rc = -ERESTART;

> +        }

> +        break;

> +    }

> +

>      default:

>          rc = -EOPNOTSUPP;

>          break;

> @@ -669,6 +699,7 @@ CHECK_dm_op_set_mem_type;

>  CHECK_dm_op_inject_event;

>  CHECK_dm_op_inject_msi;

>  CHECK_dm_op_remote_shutdown;

> +CHECK_dm_op_add_to_physmap;

> 

>  int compat_dm_op(domid_t domid,

>                   unsigned int nr_bufs,

> diff --git a/xen/include/public/hvm/dm_op.h

> b/xen/include/public/hvm/dm_op.h

> index e173085..f685110 100644

> --- a/xen/include/public/hvm/dm_op.h

> +++ b/xen/include/public/hvm/dm_op.h

> @@ -368,6 +368,22 @@ struct xen_dm_op_remote_shutdown {

>                             /* (Other reason values are not blocked) */

>  };

> 

> +/*

> + * XEN_DMOP_add_to_physmap : Sets the GPFNs at which a page range

> appears in

> + *                           the specified guest's pseudophysical address

> + *                           space. Identical to XENMEM_add_to_physmap with

> + *                           space == XENMAPSPACE_gmfn_range.

> + */

> +#define XEN_DMOP_add_to_physmap 17

> +

> +struct xen_dm_op_add_to_physmap {

> +    uint16_t size;         /* Number of GMFNs to process. */

> +    uint16_t pad0;

> +    uint32_t pad1;


I think you can lose pad1 by putting idx and gpfn above size rather than below (since IIRC we only need pad up to the next 4 byte boundary).

  Paul

> +    uint64_aligned_t idx;  /* Index into GMFN space. */

> +    uint64_aligned_t gpfn; /* Starting GPFN where the GMFNs should

> appear. */

> +};

> +

>  struct xen_dm_op {

>      uint32_t op;

>      uint32_t pad;

> @@ -389,6 +405,7 @@ struct xen_dm_op {

>          struct xen_dm_op_map_mem_type_to_ioreq_server

>                  map_mem_type_to_ioreq_server;

>          struct xen_dm_op_remote_shutdown remote_shutdown;

> +        struct xen_dm_op_add_to_physmap add_to_physmap;

>      } u;

>  };

> 

> diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst

> index 4346cbe..d40bac6 100644

> --- a/xen/include/xlat.lst

> +++ b/xen/include/xlat.lst

> @@ -57,6 +57,7 @@

>  ?	grant_entry_v2			grant_table.h

>  ?	gnttab_swap_grant_ref		grant_table.h

>  !	dm_op_buf			hvm/dm_op.h

> +?	dm_op_add_to_physmap		hvm/dm_op.h

>  ?	dm_op_create_ioreq_server	hvm/dm_op.h

>  ?	dm_op_destroy_ioreq_server	hvm/dm_op.h

>  ?	dm_op_get_ioreq_server_info	hvm/dm_op.h

> --

> 2.9.5

> 

> 

> _______________________________________________

> Xen-devel mailing list

> Xen-devel@lists.xen.org

> https://lists.xen.org/xen-devel
Jan Beulich Oct. 23, 2017, 12:18 p.m. UTC | #2
>>> On 23.10.17 at 14:03, <Paul.Durrant@citrix.com> wrote:
>> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
>> Ross Lagerwall
>> Sent: 23 October 2017 10:05
>> --- a/xen/include/public/hvm/dm_op.h
>> +++ b/xen/include/public/hvm/dm_op.h
>> @@ -368,6 +368,22 @@ struct xen_dm_op_remote_shutdown {
>>                             /* (Other reason values are not blocked) */
>>  };
>> 
>> +/*
>> + * XEN_DMOP_add_to_physmap : Sets the GPFNs at which a page range
>> appears in
>> + *                           the specified guest's pseudophysical address
>> + *                           space. Identical to XENMEM_add_to_physmap with
>> + *                           space == XENMAPSPACE_gmfn_range.
>> + */
>> +#define XEN_DMOP_add_to_physmap 17
>> +
>> +struct xen_dm_op_add_to_physmap {
>> +    uint16_t size;         /* Number of GMFNs to process. */
>> +    uint16_t pad0;
>> +    uint32_t pad1;
> 
> I think you can lose pad1 by putting idx and gpfn above size rather than 
> below (since IIRC we only need pad up to the next 4 byte boundary).

No, tail padding would then still be wanted, I think.

Jan
Ross Lagerwall Oct. 23, 2017, 12:18 p.m. UTC | #3
On 10/23/2017 01:03 PM, Paul Durrant wrote:
snip>> +/*
>> + * XEN_DMOP_add_to_physmap : Sets the GPFNs at which a page range
>> appears in
>> + *                           the specified guest's pseudophysical address
>> + *                           space. Identical to XENMEM_add_to_physmap with
>> + *                           space == XENMAPSPACE_gmfn_range.
>> + */
>> +#define XEN_DMOP_add_to_physmap 17
>> +
>> +struct xen_dm_op_add_to_physmap {
>> +    uint16_t size;         /* Number of GMFNs to process. */
>> +    uint16_t pad0;
>> +    uint32_t pad1;
> 
> I think you can lose pad1 by putting idx and gpfn above size rather than below (since IIRC we only need pad up to the next 4 byte boundary).
> 
Nope, the build fails unless I pad it to an 8 byte boundary. This is 
also why I added padding to struct xen_dm_op_pin_memory_cacheattr...
Paul Durrant Oct. 23, 2017, 12:19 p.m. UTC | #4
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 23 October 2017 13:18
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; George Dunlap
> <George.Dunlap@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Ross
> Lagerwall <ross.lagerwall@citrix.com>; Wei Liu <wei.liu2@citrix.com>;
> Stefano Stabellini <sstabellini@kernel.org>; xen-devel@lists.xen.org; Konrad
> Rzeszutek Wilk <konrad.wilk@oracle.com>; Tim (Xen.org) <tim@xen.org>
> Subject: RE: [Xen-devel] [PATCH v2 2/5] xen: Provide
> XEN_DMOP_add_to_physmap
> 
> >>> On 23.10.17 at 14:03, <Paul.Durrant@citrix.com> wrote:
> >> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> >> Ross Lagerwall
> >> Sent: 23 October 2017 10:05
> >> --- a/xen/include/public/hvm/dm_op.h
> >> +++ b/xen/include/public/hvm/dm_op.h
> >> @@ -368,6 +368,22 @@ struct xen_dm_op_remote_shutdown {
> >>                             /* (Other reason values are not blocked) */
> >>  };
> >>
> >> +/*
> >> + * XEN_DMOP_add_to_physmap : Sets the GPFNs at which a page range
> >> appears in
> >> + *                           the specified guest's pseudophysical address
> >> + *                           space. Identical to XENMEM_add_to_physmap with
> >> + *                           space == XENMAPSPACE_gmfn_range.
> >> + */
> >> +#define XEN_DMOP_add_to_physmap 17
> >> +
> >> +struct xen_dm_op_add_to_physmap {
> >> +    uint16_t size;         /* Number of GMFNs to process. */
> >> +    uint16_t pad0;
> >> +    uint32_t pad1;
> >
> > I think you can lose pad1 by putting idx and gpfn above size rather than
> > below (since IIRC we only need pad up to the next 4 byte boundary).
> 
> No, tail padding would then still be wanted, I think.

Ok.  I stand corrected :-)

  Paul

> 
> Jan
Jan Beulich Nov. 21, 2017, 4:28 p.m. UTC | #5
>>> On 23.10.17 at 11:05, <ross.lagerwall@citrix.com> wrote:

First of all, instead of xen: please consider using something more
specific, like x86/hvm:.

> --- a/xen/include/public/hvm/dm_op.h
> +++ b/xen/include/public/hvm/dm_op.h
> @@ -368,6 +368,22 @@ struct xen_dm_op_remote_shutdown {
>                             /* (Other reason values are not blocked) */
>  };
>  
> +/*
> + * XEN_DMOP_add_to_physmap : Sets the GPFNs at which a page range appears in
> + *                           the specified guest's pseudophysical address

Talking of "pseudophysical" is at least confusing for HVM guests. So
far it was my understanding that such exists for PV guests only.

> + *                           space. Identical to XENMEM_add_to_physmap with
> + *                           space == XENMAPSPACE_gmfn_range.
> + */
> +#define XEN_DMOP_add_to_physmap 17
> +
> +struct xen_dm_op_add_to_physmap {
> +    uint16_t size;         /* Number of GMFNs to process. */

Why limit this to 16 bits?

> +    uint16_t pad0;
> +    uint32_t pad1;
> +    uint64_aligned_t idx;  /* Index into GMFN space. */

Why would you call this "idx"? The other interface and its naming
should have no significance here. So perhaps "src_gfn" and ...

> +    uint64_aligned_t gpfn; /* Starting GPFN where the GMFNs should appear. */

... "dst_gfn"?

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 32ade95..0027567 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -640,6 +640,36 @@  static int dm_op(const struct dmop_args *op_args)
         break;
     }
 
+    case XEN_DMOP_add_to_physmap:
+    {
+        struct xen_dm_op_add_to_physmap *data =
+            &op.u.add_to_physmap;
+        struct xen_add_to_physmap xatp = {
+            .domid = op_args->domid,
+            .size = data->size,
+            .space = XENMAPSPACE_gmfn_range,
+            .idx = data->idx,
+            .gpfn = data->gpfn,
+        };
+
+        if ( data->pad0 || data->pad1 )
+        {
+            rc = -EINVAL;
+            break;
+        }
+
+        rc = xenmem_add_to_physmap(d, &xatp, 0);
+        if ( rc > 0 )
+        {
+            data->size -= rc;
+            data->idx += rc;
+            data->gpfn += rc;
+            const_op = false;
+            rc = -ERESTART;
+        }
+        break;
+    }
+
     default:
         rc = -EOPNOTSUPP;
         break;
@@ -669,6 +699,7 @@  CHECK_dm_op_set_mem_type;
 CHECK_dm_op_inject_event;
 CHECK_dm_op_inject_msi;
 CHECK_dm_op_remote_shutdown;
+CHECK_dm_op_add_to_physmap;
 
 int compat_dm_op(domid_t domid,
                  unsigned int nr_bufs,
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index e173085..f685110 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -368,6 +368,22 @@  struct xen_dm_op_remote_shutdown {
                            /* (Other reason values are not blocked) */
 };
 
+/*
+ * XEN_DMOP_add_to_physmap : Sets the GPFNs at which a page range appears in
+ *                           the specified guest's pseudophysical address
+ *                           space. Identical to XENMEM_add_to_physmap with
+ *                           space == XENMAPSPACE_gmfn_range.
+ */
+#define XEN_DMOP_add_to_physmap 17
+
+struct xen_dm_op_add_to_physmap {
+    uint16_t size;         /* Number of GMFNs to process. */
+    uint16_t pad0;
+    uint32_t pad1;
+    uint64_aligned_t idx;  /* Index into GMFN space. */
+    uint64_aligned_t gpfn; /* Starting GPFN where the GMFNs should appear. */
+};
+
 struct xen_dm_op {
     uint32_t op;
     uint32_t pad;
@@ -389,6 +405,7 @@  struct xen_dm_op {
         struct xen_dm_op_map_mem_type_to_ioreq_server
                 map_mem_type_to_ioreq_server;
         struct xen_dm_op_remote_shutdown remote_shutdown;
+        struct xen_dm_op_add_to_physmap add_to_physmap;
     } u;
 };
 
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 4346cbe..d40bac6 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -57,6 +57,7 @@ 
 ?	grant_entry_v2			grant_table.h
 ?	gnttab_swap_grant_ref		grant_table.h
 !	dm_op_buf			hvm/dm_op.h
+?	dm_op_add_to_physmap		hvm/dm_op.h
 ?	dm_op_create_ioreq_server	hvm/dm_op.h
 ?	dm_op_destroy_ioreq_server	hvm/dm_op.h
 ?	dm_op_get_ioreq_server_info	hvm/dm_op.h