diff mbox series

[v7,2/7] x86/hvm: introduce hvm_copy_context_and_params

Message ID 4533b6750698ec7f3c4721261c6584a2e3285cc5.1580746020.git.tamas.lengyel@intel.com (mailing list archive)
State Superseded
Headers show
Series VM forking | expand

Commit Message

Tamas K Lengyel Feb. 3, 2020, 4:12 p.m. UTC
Currently the hvm parameters are only accessible via the HVMOP hypercalls. In
this patch we introduce a new function that can copy both the hvm context and
parameters directly into a target domain.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
---
v7: get rid of goto's during code-movement as requested by Jan
    use different order of operation in hvm_copy_context_and_params
---
 xen/arch/x86/hvm/hvm.c        | 253 ++++++++++++++++++++--------------
 xen/include/asm-x86/hvm/hvm.h |   2 +
 2 files changed, 152 insertions(+), 103 deletions(-)

Comments

Jan Beulich Feb. 5, 2020, 11:39 a.m. UTC | #1
On 03.02.2020 17:12, Tamas K Lengyel wrote:
> @@ -4414,6 +4424,40 @@ static int hvm_allow_get_param(struct domain *d,
>      return rc;
>  }
>  
> +static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value)
> +{
> +    int rc;
> +
> +    rc = hvm_allow_get_param(d, index);
> +    if ( rc )
> +        return rc;
> +
> +    switch ( index )
> +    {
> +    case HVM_PARAM_ACPI_S_STATE:
> +        *value = d->arch.hvm.is_s3_suspended ? 3 : 0;
> +        break;
> +
> +    case HVM_PARAM_VM86_TSS:
> +        *value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
> +        break;
> +
> +    case HVM_PARAM_VM86_TSS_SIZED:
> +        *value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
> +                   ~VM86_TSS_UPDATED;

Indentation got broken here.

> +        break;
> +
> +    case HVM_PARAM_X87_FIP_WIDTH:
> +        *value = d->arch.x87_fip_width;
> +        break;
> +    default:

A blank line would have wanted introducing above here.

> @@ -5301,6 +5317,37 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
>      alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
>  }
>  
> +int hvm_copy_context_and_params(struct domain *dst, struct domain *src)
> +{
> +    int rc;
> +    unsigned int i;
> +    struct hvm_domain_context c = { };
> +
> +    c.size = hvm_save_size(src);

As mentioned in an earlier version's review, this could be put in
the variable's initializer.

I'll take care of all of the above and add
Reviewed-by: Jan Beulich <jbeulich@suse.com>
while committing.

Jan
Tamas K Lengyel Feb. 5, 2020, 2:11 p.m. UTC | #2
On Wed, Feb 5, 2020 at 4:40 AM Jan Beulich <jbeulich@suse.com> wrote:
>
> On 03.02.2020 17:12, Tamas K Lengyel wrote:
> > @@ -4414,6 +4424,40 @@ static int hvm_allow_get_param(struct domain *d,
> >      return rc;
> >  }
> >
> > +static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value)
> > +{
> > +    int rc;
> > +
> > +    rc = hvm_allow_get_param(d, index);
> > +    if ( rc )
> > +        return rc;
> > +
> > +    switch ( index )
> > +    {
> > +    case HVM_PARAM_ACPI_S_STATE:
> > +        *value = d->arch.hvm.is_s3_suspended ? 3 : 0;
> > +        break;
> > +
> > +    case HVM_PARAM_VM86_TSS:
> > +        *value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
> > +        break;
> > +
> > +    case HVM_PARAM_VM86_TSS_SIZED:
> > +        *value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
> > +                   ~VM86_TSS_UPDATED;
>
> Indentation got broken here.
>
> > +        break;
> > +
> > +    case HVM_PARAM_X87_FIP_WIDTH:
> > +        *value = d->arch.x87_fip_width;
> > +        break;
> > +    default:
>
> A blank line would have wanted introducing above here.
>
> > @@ -5301,6 +5317,37 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
> >      alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
> >  }
> >
> > +int hvm_copy_context_and_params(struct domain *dst, struct domain *src)
> > +{
> > +    int rc;
> > +    unsigned int i;
> > +    struct hvm_domain_context c = { };
> > +
> > +    c.size = hvm_save_size(src);
>
> As mentioned in an earlier version's review, this could be put in
> the variable's initializer.
>
> I'll take care of all of the above and add
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> while committing.

Thanks, it's appreciated.
Tamas
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2fee569a5f..010866395a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4081,16 +4081,17 @@  static int hvmop_set_evtchn_upcall_vector(
 }
 
 static int hvm_allow_set_param(struct domain *d,
-                               const struct xen_hvm_param *a)
+                               uint32_t index,
+                               uint64_t new_value)
 {
-    uint64_t value = d->arch.hvm.params[a->index];
+    uint64_t value = d->arch.hvm.params[index];
     int rc;
 
     rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
     if ( rc )
         return rc;
 
-    switch ( a->index )
+    switch ( index )
     {
     /* The following parameters can be set by the guest. */
     case HVM_PARAM_CALLBACK_IRQ:
@@ -4123,7 +4124,7 @@  static int hvm_allow_set_param(struct domain *d,
     if ( rc )
         return rc;
 
-    switch ( a->index )
+    switch ( index )
     {
     /* The following parameters should only be changed once. */
     case HVM_PARAM_VIRIDIAN:
@@ -4133,7 +4134,7 @@  static int hvm_allow_set_param(struct domain *d,
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     case HVM_PARAM_ALTP2M:
     case HVM_PARAM_MCA_CAP:
-        if ( value != 0 && a->value != value )
+        if ( value != 0 && new_value != value )
             rc = -EEXIST;
         break;
     default:
@@ -4143,49 +4144,32 @@  static int hvm_allow_set_param(struct domain *d,
     return rc;
 }
 
-static int hvmop_set_param(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
+static int hvm_set_param(struct domain *d, uint32_t index, uint64_t value)
 {
     struct domain *curr_d = current->domain;
-    struct xen_hvm_param a;
-    struct domain *d;
     struct vcpu *v;
     int rc;
 
-    if ( copy_from_guest(&a, arg, 1) )
-        return -EFAULT;
-
-    if ( a.index >= HVM_NR_PARAMS )
+    if ( index >= HVM_NR_PARAMS )
         return -EINVAL;
 
-    /* Make sure the above bound check is not bypassed during speculation. */
-    block_speculation();
-
-    d = rcu_lock_domain_by_any_id(a.domid);
-    if ( d == NULL )
-        return -ESRCH;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = hvm_allow_set_param(d, &a);
+    rc = hvm_allow_set_param(d, index, value);
     if ( rc )
-        goto out;
+        return rc;
 
-    switch ( a.index )
+    switch ( index )
     {
     case HVM_PARAM_CALLBACK_IRQ:
-        hvm_set_callback_via(d, a.value);
+        hvm_set_callback_via(d, value);
         hvm_latch_shinfo_size(d);
         break;
     case HVM_PARAM_TIMER_MODE:
-        if ( a.value > HVMPTM_one_missed_tick_pending )
+        if ( value > HVMPTM_one_missed_tick_pending )
             rc = -EINVAL;
         break;
     case HVM_PARAM_VIRIDIAN:
-        if ( (a.value & ~HVMPV_feature_mask) ||
-             !(a.value & HVMPV_base_freq) )
+        if ( (value & ~HVMPV_feature_mask) ||
+             !(value & HVMPV_base_freq) )
             rc = -EINVAL;
         break;
     case HVM_PARAM_IDENT_PT:
@@ -4195,7 +4179,7 @@  static int hvmop_set_param(
          */
         if ( !paging_mode_hap(d) || !cpu_has_vmx )
         {
-            d->arch.hvm.params[a.index] = a.value;
+            d->arch.hvm.params[index] = value;
             break;
         }
 
@@ -4210,7 +4194,7 @@  static int hvmop_set_param(
 
         rc = 0;
         domain_pause(d);
-        d->arch.hvm.params[a.index] = a.value;
+        d->arch.hvm.params[index] = value;
         for_each_vcpu ( d, v )
             paging_update_cr3(v, false);
         domain_unpause(d);
@@ -4219,23 +4203,23 @@  static int hvmop_set_param(
         break;
     case HVM_PARAM_DM_DOMAIN:
         /* The only value this should ever be set to is DOMID_SELF */
-        if ( a.value != DOMID_SELF )
+        if ( value != DOMID_SELF )
             rc = -EINVAL;
 
-        a.value = curr_d->domain_id;
+        value = curr_d->domain_id;
         break;
     case HVM_PARAM_ACPI_S_STATE:
         rc = 0;
-        if ( a.value == 3 )
+        if ( value == 3 )
             hvm_s3_suspend(d);
-        else if ( a.value == 0 )
+        else if ( value == 0 )
             hvm_s3_resume(d);
         else
             rc = -EINVAL;
 
         break;
     case HVM_PARAM_ACPI_IOPORTS_LOCATION:
-        rc = pmtimer_change_ioport(d, a.value);
+        rc = pmtimer_change_ioport(d, value);
         break;
     case HVM_PARAM_MEMORY_EVENT_CR0:
     case HVM_PARAM_MEMORY_EVENT_CR3:
@@ -4250,24 +4234,24 @@  static int hvmop_set_param(
         rc = xsm_hvm_param_nested(XSM_PRIV, d);
         if ( rc )
             break;
-        if ( a.value > 1 )
+        if ( value > 1 )
             rc = -EINVAL;
         /*
          * Remove the check below once we have
          * shadow-on-shadow.
          */
-        if ( !paging_mode_hap(d) && a.value )
+        if ( !paging_mode_hap(d) && value )
             rc = -EINVAL;
-        if ( a.value &&
+        if ( value &&
              d->arch.hvm.params[HVM_PARAM_ALTP2M] )
             rc = -EINVAL;
         /* Set up NHVM state for any vcpus that are already up. */
-        if ( a.value &&
+        if ( value &&
              !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             for_each_vcpu(d, v)
                 if ( rc == 0 )
                     rc = nestedhvm_vcpu_initialise(v);
-        if ( !a.value || rc )
+        if ( !value || rc )
             for_each_vcpu(d, v)
                 nestedhvm_vcpu_destroy(v);
         break;
@@ -4275,30 +4259,30 @@  static int hvmop_set_param(
         rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
         if ( rc )
             break;
-        if ( a.value > XEN_ALTP2M_limited )
+        if ( value > XEN_ALTP2M_limited )
             rc = -EINVAL;
-        if ( a.value &&
+        if ( value &&
              d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
             rc = -EINVAL;
         break;
     case HVM_PARAM_TRIPLE_FAULT_REASON:
-        if ( a.value > SHUTDOWN_MAX )
+        if ( value > SHUTDOWN_MAX )
             rc = -EINVAL;
         break;
     case HVM_PARAM_IOREQ_SERVER_PFN:
-        d->arch.hvm.ioreq_gfn.base = a.value;
+        d->arch.hvm.ioreq_gfn.base = value;
         break;
     case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
     {
         unsigned int i;
 
-        if ( a.value == 0 ||
-             a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
+        if ( value == 0 ||
+             value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
         {
             rc = -EINVAL;
             break;
         }
-        for ( i = 0; i < a.value; i++ )
+        for ( i = 0; i < value; i++ )
             set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
 
         break;
@@ -4310,35 +4294,35 @@  static int hvmop_set_param(
                      sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
         BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN >
                      sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
-        if ( a.value )
-            set_bit(a.index, &d->arch.hvm.ioreq_gfn.legacy_mask);
+        if ( value )
+            set_bit(index, &d->arch.hvm.ioreq_gfn.legacy_mask);
         break;
 
     case HVM_PARAM_X87_FIP_WIDTH:
-        if ( a.value != 0 && a.value != 4 && a.value != 8 )
+        if ( value != 0 && value != 4 && value != 8 )
         {
             rc = -EINVAL;
             break;
         }
-        d->arch.x87_fip_width = a.value;
+        d->arch.x87_fip_width = value;
         break;
 
     case HVM_PARAM_VM86_TSS:
         /* Hardware would silently truncate high bits. */
-        if ( a.value != (uint32_t)a.value )
+        if ( value != (uint32_t)value )
         {
             if ( d == curr_d )
                 domain_crash(d);
             rc = -EINVAL;
         }
         /* Old hvmloader binaries hardcode the size to 128 bytes. */
-        if ( a.value )
-            a.value |= (128ULL << 32) | VM86_TSS_UPDATED;
-        a.index = HVM_PARAM_VM86_TSS_SIZED;
+        if ( value )
+            value |= (128ULL << 32) | VM86_TSS_UPDATED;
+        index = HVM_PARAM_VM86_TSS_SIZED;
         break;
 
     case HVM_PARAM_VM86_TSS_SIZED:
-        if ( (a.value >> 32) < sizeof(struct tss32) )
+        if ( (value >> 32) < sizeof(struct tss32) )
         {
             if ( d == curr_d )
                 domain_crash(d);
@@ -4349,34 +4333,60 @@  static int hvmop_set_param(
          * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap
          * plus one padding byte).
          */
-        if ( (a.value >> 32) > sizeof(struct tss32) +
+        if ( (value >> 32) > sizeof(struct tss32) +
                                (0x100 / 8) + (0x10000 / 8) + 1 )
-            a.value = (uint32_t)a.value |
+            value = (uint32_t)value |
                       ((sizeof(struct tss32) + (0x100 / 8) +
                                                (0x10000 / 8) + 1) << 32);
-        a.value |= VM86_TSS_UPDATED;
+        value |= VM86_TSS_UPDATED;
         break;
 
     case HVM_PARAM_MCA_CAP:
-        rc = vmce_enable_mca_cap(d, a.value);
+        rc = vmce_enable_mca_cap(d, value);
         break;
     }
 
-    if ( rc != 0 )
-        goto out;
+    if ( !rc )
+    {
+        d->arch.hvm.params[index] = value;
 
-    d->arch.hvm.params[a.index] = a.value;
+        HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
+                    index, value);
+    }
 
-    HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
-                a.index, a.value);
+    return rc;
+}
+
+int hvmop_set_param(
+    XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
+{
+    struct xen_hvm_param a;
+    struct domain *d;
+    int rc;
+
+    if ( copy_from_guest(&a, arg, 1) )
+        return -EFAULT;
+
+    if ( a.index >= HVM_NR_PARAMS )
+        return -EINVAL;
+
+    /* Make sure the above bound check is not bypassed during speculation. */
+    block_speculation();
+
+    d = rcu_lock_domain_by_any_id(a.domid);
+    if ( d == NULL )
+        return -ESRCH;
+
+    rc = -EINVAL;
+    if ( is_hvm_domain(d) )
+        rc = hvm_set_param(d, a.index, a.value);
 
- out:
     rcu_unlock_domain(d);
     return rc;
 }
 
 static int hvm_allow_get_param(struct domain *d,
-                               const struct xen_hvm_param *a)
+                               uint32_t index)
 {
     int rc;
 
@@ -4384,7 +4394,7 @@  static int hvm_allow_get_param(struct domain *d,
     if ( rc )
         return rc;
 
-    switch ( a->index )
+    switch ( index )
     {
     /* The following parameters can be read by the guest. */
     case HVM_PARAM_CALLBACK_IRQ:
@@ -4414,6 +4424,40 @@  static int hvm_allow_get_param(struct domain *d,
     return rc;
 }
 
+static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value)
+{
+    int rc;
+
+    rc = hvm_allow_get_param(d, index);
+    if ( rc )
+        return rc;
+
+    switch ( index )
+    {
+    case HVM_PARAM_ACPI_S_STATE:
+        *value = d->arch.hvm.is_s3_suspended ? 3 : 0;
+        break;
+
+    case HVM_PARAM_VM86_TSS:
+        *value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
+        break;
+
+    case HVM_PARAM_VM86_TSS_SIZED:
+        *value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
+                   ~VM86_TSS_UPDATED;
+        break;
+
+    case HVM_PARAM_X87_FIP_WIDTH:
+        *value = d->arch.x87_fip_width;
+        break;
+    default:
+        *value = d->arch.hvm.params[index];
+        break;
+    }
+
+    return 0;
+};
+
 static int hvmop_get_param(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
 {
@@ -4435,42 +4479,14 @@  static int hvmop_get_param(
         return -ESRCH;
 
     rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = hvm_allow_get_param(d, &a);
-    if ( rc )
-        goto out;
-
-    switch ( a.index )
+    if ( is_hvm_domain(d) && !(rc = hvm_get_param(d, a.index, &a.value)) )
     {
-    case HVM_PARAM_ACPI_S_STATE:
-        a.value = d->arch.hvm.is_s3_suspended ? 3 : 0;
-        break;
-
-    case HVM_PARAM_VM86_TSS:
-        a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
-        break;
-
-    case HVM_PARAM_VM86_TSS_SIZED:
-        a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
-                  ~VM86_TSS_UPDATED;
-        break;
+        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
 
-    case HVM_PARAM_X87_FIP_WIDTH:
-        a.value = d->arch.x87_fip_width;
-        break;
-    default:
-        a.value = d->arch.hvm.params[a.index];
-        break;
+        HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64,
+                    a.index, a.value);
     }
 
-    rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-
-    HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64,
-                a.index, a.value);
-
- out:
     rcu_unlock_domain(d);
     return rc;
 }
@@ -5301,6 +5317,37 @@  void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
     alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
 }
 
+int hvm_copy_context_and_params(struct domain *dst, struct domain *src)
+{
+    int rc;
+    unsigned int i;
+    struct hvm_domain_context c = { };
+
+    c.size = hvm_save_size(src);
+    if ( (c.data = vmalloc(c.size)) == NULL )
+        return -ENOMEM;
+
+    if ( (rc = hvm_save(src, &c)) )
+        return rc;
+
+    for ( i = 0; i < HVM_NR_PARAMS; i++ )
+    {
+        uint64_t value = 0;
+
+        if ( hvm_get_param(src, i, &value) || !value )
+            continue;
+
+        if ( (rc = hvm_set_param(dst, i, value)) )
+            return rc;
+    }
+
+    c.cur = 0;
+    rc = hvm_load(dst, &c);
+    vfree(c.data);
+
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 9eab1d7493..24da824cbf 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -337,6 +337,8 @@  unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
 bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
                         void *ctxt);
 
+int hvm_copy_context_and_params(struct domain *src, struct domain *dst);
+
 #ifdef CONFIG_HVM
 
 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)