diff mbox

[v6,1/2] xen/hvm: introduce a flags field in the CPU save record

Message ID 1452615153-67387-2-git-send-email-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Roger Pau Monné Jan. 12, 2016, 4:12 p.m. UTC
Introduce a new flags field and use bit 0 to signal if the FPU has been
initialised or not. Previously Xen always wrongly assumed the FPU was
initialised on restore.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v5:
 - Adapt the code due to the changes introduced by XSA-165.

Changes since v4:
 - Zero the fpu save area if the FPU is not initialised. This avoids
   leaking hypervisor data.
 - Due to the previous patch in this series, slightly rework the way to load
   the FPU state in a xsave capable host.

Changes since v3:
 - Don't add a comment in the compat structure regaring the fpu_initialised
   field.
 - Rename fpu_initialised to flags and use it as a bit field. Bit 0 will be
   used to signal whether the fpu is initialised.
 - Only save the fpu context if it's initialised.
 - Only restore the fpu context from the save record if the fpu is
   initialised.
 - Check that unused bits in the flags field are 0.

Changes since v1:
 - Don't add yet another compat structure, new fields should always be added
   to the end of the existing structure and offsetof should be used to
   compare sizes.
 - Leave the previous compat structure as-is, since the field was not added
   to the end we cannot remove it and use offsetof in this case.
 - Set xstate_bv based on fpu_initialised value instead of unconditionally
   setting it to XSTATE_FP_SSE.
---
 xen/arch/x86/hvm/hvm.c                 | 42 ++++++++++++++++++++++------------
 xen/include/public/arch-x86/hvm/save.h | 27 ++++++++++++++++------
 2 files changed, 48 insertions(+), 21 deletions(-)

Comments

Jan Beulich Jan. 12, 2016, 4:31 p.m. UTC | #1
>>> On 12.01.16 at 17:12, <roger.pau@citrix.com> wrote:
> @@ -2087,19 +2100,21 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>      seg.attr.bytes = ctxt.ldtr_arbytes;
>      hvm_set_segment_register(v, x86_seg_ldtr, &seg);
>  
> -    /* In case xsave-absent save file is restored on a xsave-capable host */
> -    if ( cpu_has_xsave && !xsave_enabled(v) )
> +    v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED);
> +    if ( v->fpu_initialised )
>      {
> -        struct xsave_struct *xsave_area = v->arch.xsave_area;
> +        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
> +        /* In case xsave-absent save file is restored on a xsave-capable host */
> +        if ( cpu_has_xsave && !xsave_enabled(v) )
> +        {
> +            struct xsave_struct *xsave_area = v->arch.xsave_area;
>  
> -        memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
> -        xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
> -        if ( cpu_has_xsaves || cpu_has_xsavec )
> -            xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
> -                                             XSTATE_COMPACTION_ENABLED;
> +            xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
> +            if ( cpu_has_xsaves || cpu_has_xsavec )
> +                xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
> +                                                 XSTATE_COMPACTION_ENABLED;
> +        }
>      }
> -    else
> -        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>  

I would have expected this to simply be re-indentation, yet
you changed from if/else to just if with the else code done
ahead of it. If this really is intended, the commit message should
explain it.

Jan
Roger Pau Monné Jan. 12, 2016, 5:49 p.m. UTC | #2
El 12/01/16 a les 17.31, Jan Beulich ha escrit:
>>>> On 12.01.16 at 17:12, <roger.pau@citrix.com> wrote:
>> @@ -2087,19 +2100,21 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>>      seg.attr.bytes = ctxt.ldtr_arbytes;
>>      hvm_set_segment_register(v, x86_seg_ldtr, &seg);
>>  
>> -    /* In case xsave-absent save file is restored on a xsave-capable host */
>> -    if ( cpu_has_xsave && !xsave_enabled(v) )
>> +    v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED);
>> +    if ( v->fpu_initialised )
>>      {
>> -        struct xsave_struct *xsave_area = v->arch.xsave_area;
>> +        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>> +        /* In case xsave-absent save file is restored on a xsave-capable host */
>> +        if ( cpu_has_xsave && !xsave_enabled(v) )
>> +        {
>> +            struct xsave_struct *xsave_area = v->arch.xsave_area;
>>  
>> -        memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>> -        xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>> -        if ( cpu_has_xsaves || cpu_has_xsavec )
>> -            xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
>> -                                             XSTATE_COMPACTION_ENABLED;
>> +            xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>> +            if ( cpu_has_xsaves || cpu_has_xsavec )
>> +                xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
>> +                                                 XSTATE_COMPACTION_ENABLED;
>> +        }
>>      }
>> -    else
>> -        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>>  
> 
> I would have expected this to simply be re-indentation, yet
> you changed from if/else to just if with the else code done
> ahead of it. If this really is intended, the commit message should
> explain it.

Right, sorry. AFAICT v->arch.fpu_ctxt points to the xsave_area (as set
by vcpu_init_fpu), so I though it was simpler to just do one memcpy for
both cases, since v->arch.fpu_ctxt always points to the right area for
either cases (and I was already modifying the code in question).

I can see that this might be seen as an unrelated change, so if you want
I can split it into a separate patch, or add the following to the commit
message:

"While modifying the FPU restore part of hvm_load_cpu_ctxt remove the
memcpy branching, since v->arch.fpu_ctxt will always point to the right
area for hosts with XSAVE or without it."

Thanks, Roger.
Jan Beulich Jan. 13, 2016, 10:52 a.m. UTC | #3
>>> On 12.01.16 at 18:49, <roger.pau@citrix.com> wrote:
> El 12/01/16 a les 17.31, Jan Beulich ha escrit:
>>>>> On 12.01.16 at 17:12, <roger.pau@citrix.com> wrote:
>>> @@ -2087,19 +2100,21 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
>>>      seg.attr.bytes = ctxt.ldtr_arbytes;
>>>      hvm_set_segment_register(v, x86_seg_ldtr, &seg);
>>>  
>>> -    /* In case xsave-absent save file is restored on a xsave-capable host */
>>> -    if ( cpu_has_xsave && !xsave_enabled(v) )
>>> +    v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED);
>>> +    if ( v->fpu_initialised )
>>>      {
>>> -        struct xsave_struct *xsave_area = v->arch.xsave_area;
>>> +        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>>> +        /* In case xsave-absent save file is restored on a xsave-capable host */
>>> +        if ( cpu_has_xsave && !xsave_enabled(v) )
>>> +        {
>>> +            struct xsave_struct *xsave_area = v->arch.xsave_area;
>>>  
>>> -        memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>>> -        xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>>> -        if ( cpu_has_xsaves || cpu_has_xsavec )
>>> -            xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
>>> -                                             XSTATE_COMPACTION_ENABLED;
>>> +            xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
>>> +            if ( cpu_has_xsaves || cpu_has_xsavec )
>>> +                xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
>>> +                                                 XSTATE_COMPACTION_ENABLED;
>>> +        }
>>>      }
>>> -    else
>>> -        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
>>>  
>> 
>> I would have expected this to simply be re-indentation, yet
>> you changed from if/else to just if with the else code done
>> ahead of it. If this really is intended, the commit message should
>> explain it.
> 
> Right, sorry. AFAICT v->arch.fpu_ctxt points to the xsave_area (as set
> by vcpu_init_fpu), so I though it was simpler to just do one memcpy for
> both cases, since v->arch.fpu_ctxt always points to the right area for
> either cases (and I was already modifying the code in question).
> 
> I can see that this might be seen as an unrelated change, so if you want
> I can split it into a separate patch, or add the following to the commit
> message:
> 
> "While modifying the FPU restore part of hvm_load_cpu_ctxt remove the
> memcpy branching, since v->arch.fpu_ctxt will always point to the right
> area for hosts with XSAVE or without it."

I'm fine with that commit message extension, and will be happy to
add it while committing unless a need arises for you to re-submit.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 787b7de..79ec90f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1797,9 +1797,15 @@  static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         ctxt.ldtr_arbytes = seg.attr.bytes;
 
         if ( v->fpu_initialised )
+        {
             memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
-        else 
-            memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
+            ctxt.flags = XEN_X86_FPU_INITIALISED;
+        }
+        else
+        {
+             memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
+             ctxt.flags = 0;
+        }
 
         ctxt.rax = v->arch.user_regs.eax;
         ctxt.rbx = v->arch.user_regs.ebx;
@@ -1981,7 +1987,7 @@  static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         return -EINVAL;
     }
 
-    if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) 
+    if ( hvm_load_entry_zeroextend(CPU, h, &ctxt) != 0 )
         return -EINVAL;
 
     /* Sanity check some control registers. */
@@ -2009,6 +2015,13 @@  static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
         return -EINVAL;
     }
 
+    if ( (ctxt.flags & ~XEN_X86_FPU_INITIALISED) != 0 )
+    {
+        gprintk(XENLOG_ERR, "bad flags value in CPU context: %#x\n",
+                ctxt.flags);
+        return -EINVAL;
+    }
+
     /* Older Xen versions used to save the segment arbytes directly 
      * from the VMCS on Intel hosts.  Detect this and rearrange them
      * into the struct segment_register format. */
@@ -2087,19 +2100,21 @@  static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     seg.attr.bytes = ctxt.ldtr_arbytes;
     hvm_set_segment_register(v, x86_seg_ldtr, &seg);
 
-    /* In case xsave-absent save file is restored on a xsave-capable host */
-    if ( cpu_has_xsave && !xsave_enabled(v) )
+    v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED);
+    if ( v->fpu_initialised )
     {
-        struct xsave_struct *xsave_area = v->arch.xsave_area;
+        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+        /* In case xsave-absent save file is restored on a xsave-capable host */
+        if ( cpu_has_xsave && !xsave_enabled(v) )
+        {
+            struct xsave_struct *xsave_area = v->arch.xsave_area;
 
-        memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
-        xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
-        if ( cpu_has_xsaves || cpu_has_xsavec )
-            xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
-                                             XSTATE_COMPACTION_ENABLED;
+            xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
+            if ( cpu_has_xsaves || cpu_has_xsavec )
+                xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
+                                                 XSTATE_COMPACTION_ENABLED;
+        }
     }
-    else
-        memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
 
     v->arch.user_regs.eax = ctxt.rax;
     v->arch.user_regs.ebx = ctxt.rbx;
@@ -2127,7 +2142,6 @@  static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     v->arch.debugreg[7] = ctxt.dr7;
 
     v->arch.vgc_flags = VGCF_online;
-    v->fpu_initialised = 1;
 
     /* Auxiliary processors should be woken immediately. */
     v->is_initialised = 1;
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 29d513c..b6b1bf8 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -47,7 +47,9 @@  DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
 /*
  * Processor
  *
- * Compat: Pre-3.4 didn't have msr_tsc_aux
+ * Compat:
+ *     - Pre-3.4 didn't have msr_tsc_aux
+ *     - Pre-4.7 didn't have fpu_initialised
  */
 
 struct hvm_hw_cpu {
@@ -157,6 +159,10 @@  struct hvm_hw_cpu {
     };
     /* error code for pending event */
     uint32_t error_code;
+
+#define _XEN_X86_FPU_INITIALISED        0
+#define XEN_X86_FPU_INITIALISED         (1U<<_XEN_X86_FPU_INITIALISED)
+    uint32_t flags;
 };
 
 struct hvm_hw_cpu_compat {
@@ -275,12 +281,19 @@  static inline int _hvm_hw_fix_cpu(void *h, uint32_t size) {
         struct hvm_hw_cpu_compat cmp;
     } *ucpu = (union hvm_hw_cpu_union *)h;
 
-    /* If we copy from the end backwards, we should
-     * be able to do the modification in-place */
-    ucpu->nat.error_code = ucpu->cmp.error_code;
-    ucpu->nat.pending_event = ucpu->cmp.pending_event;
-    ucpu->nat.tsc = ucpu->cmp.tsc;
-    ucpu->nat.msr_tsc_aux = 0;
+    if ( size == sizeof(struct hvm_hw_cpu_compat) )
+    {
+        /*
+         * If we copy from the end backwards, we should
+         * be able to do the modification in-place.
+         */
+        ucpu->nat.error_code = ucpu->cmp.error_code;
+        ucpu->nat.pending_event = ucpu->cmp.pending_event;
+        ucpu->nat.tsc = ucpu->cmp.tsc;
+        ucpu->nat.msr_tsc_aux = 0;
+    }
+    /* Mimic the old behaviour by unconditionally setting fpu_initialised. */
+    ucpu->nat.flags = XEN_X86_FPU_INITIALISED;
 
     return 0;
 }