diff mbox

[1/3] Add new functions to get/set memory types.

Message ID 1458130871-30028-1-git-send-email-yu.c.zhang@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Yu Zhang March 16, 2016, 12:21 p.m. UTC
For clarity this patch breaks the code to set/get memory types out
of do_hvm_op() into dedicated functions: hvmop_set/get_mem_type().
Also, for clarity, checks for whether a memory type change is allowed
are broken out into a separate function called by hvmop_set_mem_type().

There is no intentional functional change in this patch.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
 xen/arch/x86/hvm/hvm.c | 280 +++++++++++++++++++++++++++----------------------
 1 file changed, 155 insertions(+), 125 deletions(-)

Comments

George Dunlap March 22, 2016, 11:31 a.m. UTC | #1
On Wed, Mar 16, 2016 at 12:21 PM, Yu Zhang <yu.c.zhang@linux.intel.com> wrote:
> For clarity this patch breaks the code to set/get memory types out
> of do_hvm_op() into dedicated functions: hvmop_set/get_mem_type().
> Also, for clarity, checks for whether a memory type change is allowed
> are broken out into a separate function called by hvmop_set_mem_type().
>
> There is no intentional functional change in this patch.
>
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> ---
>  xen/arch/x86/hvm/hvm.c | 280 +++++++++++++++++++++++++++----------------------
>  1 file changed, 155 insertions(+), 125 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 255a1d6..3ccd33f 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -6557,6 +6557,57 @@ static int do_altp2m_op(
>      return rc;
>  }
>
> +static int hvmop_get_mem_type(
> +    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg)
> +{
> +    struct xen_hvm_get_mem_type a;
> +    struct domain *d;
> +    p2m_type_t t;
> +    int rc;
> +
> +    if ( copy_from_guest(&a, arg, 1) )
> +        return -EFAULT;
> +
> +    d = rcu_lock_domain_by_any_id(a.domid);
> +    if ( d == NULL )
> +        return -ESRCH;
> +
> +    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_get_mem_type);
> +    if ( rc )
> +        goto out;
> +
> +    rc = -EINVAL;
> +    if ( is_hvm_domain(d) )
> +    {
> +        /*
> +         * Use get_gfn query as we are interested in the current
> +         * type, not in allocating or unsharing. That'll happen
> +         * on access.
> +         */
> +        get_gfn_query_unlocked(d, a.pfn, &t);
> +        if ( p2m_is_mmio(t) )
> +            a.mem_type =  HVMMEM_mmio_dm;
> +        else if ( t == p2m_mmio_write_dm )
> +            a.mem_type = HVMMEM_mmio_write_dm;
> +        else if ( p2m_is_readonly(t) )
> +            a.mem_type =  HVMMEM_ram_ro;
> +        else if ( p2m_is_ram(t) )
> +            a.mem_type =  HVMMEM_ram_rw;
> +        else if ( p2m_is_pod(t) )
> +            a.mem_type =  HVMMEM_ram_rw;
> +        else if ( p2m_is_grant(t) )
> +            a.mem_type =  HVMMEM_ram_rw;
> +        else
> +            a.mem_type =  HVMMEM_mmio_dm;
> +        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;

If you're going to be changing things rather than just pure code
motion, it would probably be better to make this follow the normal
"set rc / do something / goto out" pattern, rather than inventing a
new one:

rc = -EFAULT
if ( __copy_to_guest())
 goto out;

rc = 0;

I suppose ideally this whole if() block would be rewritten this way as well:

rc = -EINVAL
if ( !is_hvm_domain() )
  goto out;

[stuff currently in the if-block]

But that's probably too much for a patch we want to be mostly code-motion.

FWIW I've taken a close look and the functionality looks identical
before and after this patch.

 -George
Yu Zhang March 23, 2016, 6:30 a.m. UTC | #2
Hi George,

   Thanks a lot for your thorough review. :)

On 3/22/2016 7:31 PM, George Dunlap wrote:
> On Wed, Mar 16, 2016 at 12:21 PM, Yu Zhang <yu.c.zhang@linux.intel.com> wrote:
>> For clarity this patch breaks the code to set/get memory types out
>> of do_hvm_op() into dedicated functions: hvmop_set/get_mem_type().
>> Also, for clarity, checks for whether a memory type change is allowed
>> are broken out into a separate function called by hvmop_set_mem_type().
>>
>> There is no intentional functional change in this patch.
>>
>> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
>> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
>> ---
>>   xen/arch/x86/hvm/hvm.c | 280 +++++++++++++++++++++++++++----------------------
>>   1 file changed, 155 insertions(+), 125 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
>> index 255a1d6..3ccd33f 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -6557,6 +6557,57 @@ static int do_altp2m_op(
>>       return rc;
>>   }
>>
>> +static int hvmop_get_mem_type(
>> +    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg)
>> +{
>> +    struct xen_hvm_get_mem_type a;
>> +    struct domain *d;
>> +    p2m_type_t t;
>> +    int rc;
>> +
>> +    if ( copy_from_guest(&a, arg, 1) )
>> +        return -EFAULT;
>> +
>> +    d = rcu_lock_domain_by_any_id(a.domid);
>> +    if ( d == NULL )
>> +        return -ESRCH;
>> +
>> +    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_get_mem_type);
>> +    if ( rc )
>> +        goto out;
>> +
>> +    rc = -EINVAL;
>> +    if ( is_hvm_domain(d) )
>> +    {
>> +        /*
>> +         * Use get_gfn query as we are interested in the current
>> +         * type, not in allocating or unsharing. That'll happen
>> +         * on access.
>> +         */
>> +        get_gfn_query_unlocked(d, a.pfn, &t);
>> +        if ( p2m_is_mmio(t) )
>> +            a.mem_type =  HVMMEM_mmio_dm;
>> +        else if ( t == p2m_mmio_write_dm )
>> +            a.mem_type = HVMMEM_mmio_write_dm;
>> +        else if ( p2m_is_readonly(t) )
>> +            a.mem_type =  HVMMEM_ram_ro;
>> +        else if ( p2m_is_ram(t) )
>> +            a.mem_type =  HVMMEM_ram_rw;
>> +        else if ( p2m_is_pod(t) )
>> +            a.mem_type =  HVMMEM_ram_rw;
>> +        else if ( p2m_is_grant(t) )
>> +            a.mem_type =  HVMMEM_ram_rw;
>> +        else
>> +            a.mem_type =  HVMMEM_mmio_dm;
>> +        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
>
> If you're going to be changing things rather than just pure code
> motion, it would probably be better to make this follow the normal
> "set rc / do something / goto out" pattern, rather than inventing a
> new one:
>
> rc = -EFAULT
> if ( __copy_to_guest())
>   goto out;
>
> rc = 0;
>
> I suppose ideally this whole if() block would be rewritten this way as well:
>
> rc = -EINVAL
> if ( !is_hvm_domain() )
>    goto out;
>
> [stuff currently in the if-block]
>
> But that's probably too much for a patch we want to be mostly code-motion.
>
> FWIW I've taken a close look and the functionality looks identical
> before and after this patch.

Well, this patch is only code-motion. :)
The "set rc/ do sth. / goto out" pattern sounds reasonable, and I'd like
to take this change in next version.

>
>   -George
>

B.R.
Yu
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 255a1d6..3ccd33f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6557,6 +6557,57 @@  static int do_altp2m_op(
     return rc;
 }
 
+static int hvmop_get_mem_type(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg)
+{
+    struct xen_hvm_get_mem_type a;
+    struct domain *d;
+    p2m_type_t t;
+    int rc;
+
+    if ( copy_from_guest(&a, arg, 1) )
+        return -EFAULT;
+
+    d = rcu_lock_domain_by_any_id(a.domid);
+    if ( d == NULL )
+        return -ESRCH;
+
+    rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_get_mem_type);
+    if ( rc )
+        goto out;
+
+    rc = -EINVAL;
+    if ( is_hvm_domain(d) )
+    {
+        /*
+         * Use get_gfn query as we are interested in the current
+         * type, not in allocating or unsharing. That'll happen
+         * on access.
+         */
+        get_gfn_query_unlocked(d, a.pfn, &t);
+        if ( p2m_is_mmio(t) )
+            a.mem_type =  HVMMEM_mmio_dm;
+        else if ( t == p2m_mmio_write_dm )
+            a.mem_type = HVMMEM_mmio_write_dm;
+        else if ( p2m_is_readonly(t) )
+            a.mem_type =  HVMMEM_ram_ro;
+        else if ( p2m_is_ram(t) )
+            a.mem_type =  HVMMEM_ram_rw;
+        else if ( p2m_is_pod(t) )
+            a.mem_type =  HVMMEM_ram_rw;
+        else if ( p2m_is_grant(t) )
+            a.mem_type =  HVMMEM_ram_rw;
+        else
+            a.mem_type =  HVMMEM_mmio_dm;
+        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+    }
+
+ out:
+    rcu_unlock_domain(d);
+
+    return rc;
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it: A guest suspended for migration in the
@@ -6565,6 +6616,105 @@  static int do_altp2m_op(
  */
 #define HVMOP_op_mask 0xff
 
+static bool_t hvm_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
+{
+    if ( p2m_is_ram(old) ||
+         (p2m_is_hole(old) && new == p2m_mmio_dm) ||
+         (old == p2m_mmio_write_dm && new == p2m_ram_rw) )
+        return 1;
+
+    return 0;
+}
+
+static int hvmop_set_mem_type(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_mem_type_t) arg,
+    unsigned long *iter)
+{
+    unsigned long start_iter = *iter;
+    struct xen_hvm_set_mem_type a;
+    struct domain *d;
+    int rc;
+
+    /* Interface types to internal p2m types */
+    static const p2m_type_t memtype[] = {
+        [HVMMEM_ram_rw]  = p2m_ram_rw,
+        [HVMMEM_ram_ro]  = p2m_ram_ro,
+        [HVMMEM_mmio_dm] = p2m_mmio_dm,
+        [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
+    };
+
+    if ( copy_from_guest(&a, arg, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_remote_domain_by_id(a.domid, &d);
+    if ( rc != 0 )
+        return rc;
+
+    rc = -EINVAL;
+    if ( !is_hvm_domain(d) )
+        goto out;
+
+    rc = xsm_hvm_control(XSM_DM_PRIV, d, HVMOP_set_mem_type);
+    if ( rc )
+        goto out;
+
+    rc = -EINVAL;
+    if ( a.nr < start_iter ||
+         ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+         ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
+        goto out;
+
+    if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
+        goto out;
+
+    while ( a.nr > start_iter )
+    {
+        unsigned long pfn = a.first_pfn + start_iter;
+        p2m_type_t t;
+
+        get_gfn_unshare(d, pfn, &t);
+        if ( p2m_is_paging(t) )
+        {
+            put_gfn(d, pfn);
+            p2m_mem_paging_populate(d, pfn);
+            rc = -EAGAIN;
+            goto out;
+        }
+        if ( p2m_is_shared(t) )
+        {
+            put_gfn(d, pfn);
+            rc = -EAGAIN;
+            goto out;
+        }
+        if ( !hvm_allow_p2m_type_change(t, memtype[a.hvmmem_type]) )
+        {
+            put_gfn(d, pfn);
+            goto out;
+        }
+
+        rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
+        put_gfn(d, pfn);
+
+        if ( rc )
+            goto out;
+
+        /* Check for continuation if it's not the last interation */
+        if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
+             hypercall_preempt_check() )
+        {
+            rc = -ERESTART;
+            goto out;
+        }
+    }
+    rc = 0;
+
+ out:
+    rcu_unlock_domain(d);
+    *iter = start_iter;
+
+    return rc;
+}
+
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     unsigned long start_iter, mask;
@@ -6754,135 +6904,15 @@  long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
     }
 
     case HVMOP_get_mem_type:
-    {
-        struct xen_hvm_get_mem_type a;
-        struct domain *d;
-        p2m_type_t t;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        d = rcu_lock_domain_by_any_id(a.domid);
-        if ( d == NULL )
-            return -ESRCH;
-
-        rc = xsm_hvm_param(XSM_TARGET, d, op);
-        if ( unlikely(rc) )
-            /* nothing */;
-        else if ( likely(is_hvm_domain(d)) )
-        {
-            /* Use get_gfn query as we are interested in the current 
-             * type, not in allocating or unsharing. That'll happen 
-             * on access. */
-            get_gfn_query_unlocked(d, a.pfn, &t);
-            if ( p2m_is_mmio(t) )
-                a.mem_type =  HVMMEM_mmio_dm;
-            else if ( t == p2m_mmio_write_dm )
-                a.mem_type = HVMMEM_mmio_write_dm;
-            else if ( p2m_is_readonly(t) )
-                a.mem_type =  HVMMEM_ram_ro;
-            else if ( p2m_is_ram(t) )
-                a.mem_type =  HVMMEM_ram_rw;
-            else if ( p2m_is_pod(t) )
-                a.mem_type =  HVMMEM_ram_rw;
-            else if ( p2m_is_grant(t) )
-                a.mem_type =  HVMMEM_ram_rw;
-            else
-                a.mem_type =  HVMMEM_mmio_dm;
-            if ( __copy_to_guest(arg, &a, 1) )
-                rc = -EFAULT;
-        }
-        else
-            rc = -EINVAL;
-
-        rcu_unlock_domain(d);
+        rc = hvmop_get_mem_type(
+            guest_handle_cast(arg, xen_hvm_get_mem_type_t));
         break;
-    }
 
     case HVMOP_set_mem_type:
-    {
-        struct xen_hvm_set_mem_type a;
-        struct domain *d;
-        
-        /* Interface types to internal p2m types */
-        static const p2m_type_t memtype[] = {
-            [HVMMEM_ram_rw]  = p2m_ram_rw,
-            [HVMMEM_ram_ro]  = p2m_ram_ro,
-            [HVMMEM_mmio_dm] = p2m_mmio_dm,
-            [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
-        };
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto setmemtype_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto setmemtype_fail;
-
-        rc = -EINVAL;
-        if ( a.nr < start_iter ||
-             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-            goto setmemtype_fail;
-            
-        if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
-            goto setmemtype_fail;
-
-        while ( a.nr > start_iter )
-        {
-            unsigned long pfn = a.first_pfn + start_iter;
-            p2m_type_t t;
-
-            get_gfn_unshare(d, pfn, &t);
-            if ( p2m_is_paging(t) )
-            {
-                put_gfn(d, pfn);
-                p2m_mem_paging_populate(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( p2m_is_shared(t) )
-            {
-                put_gfn(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( !p2m_is_ram(t) &&
-                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
-                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
-            {
-                put_gfn(d, pfn);
-                goto setmemtype_fail;
-            }
-
-            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
-            put_gfn(d, pfn);
-            if ( rc )
-                goto setmemtype_fail;
-
-            /* Check for continuation if it's not the last interation */
-            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-                 hypercall_preempt_check() )
-            {
-                rc = -ERESTART;
-                goto setmemtype_fail;
-            }
-        }
-
-        rc = 0;
-
-    setmemtype_fail:
-        rcu_unlock_domain(d);
+        rc = hvmop_set_mem_type(
+            guest_handle_cast(arg, xen_hvm_set_mem_type_t),
+            &start_iter);
         break;
-    }
 
     case HVMOP_pagetable_dying:
     {