diff mbox

[RFC,8/9] x86/SVM: Add interrupt management code via AVIC

Message ID 1474264368-4104-9-git-send-email-suravee.suthikulpanit@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Suravee Suthikulpanit Sept. 19, 2016, 5:52 a.m. UTC
Enabling AVIC implicitly disables the V_IRQ, V_INTR_PRIO, V_IGN_TPR,
and V_INTR_VECTOR fields in the VMCB Control Word. Therefore, this patch
introduces new interrupt injection code via AVIC backing page.

Also, the AVIC hardware automatically synchronizes TPR and CR8/vTPR, when
values are updated. Therefore, xen does not need to handle this when enable
AVIC.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 xen/arch/x86/hvm/svm/avic.c        | 31 +++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/svm/intr.c        |  4 ++++
 xen/arch/x86/hvm/svm/svm.c         | 12 ++++++++++--
 xen/arch/x86/hvm/svm/vmcb.c        |  6 +++++-
 xen/include/asm-x86/hvm/svm/avic.h |  1 +
 5 files changed, 51 insertions(+), 3 deletions(-)

Comments

Konrad Rzeszutek Wilk Oct. 14, 2016, 3:44 p.m. UTC | #1
On Mon, Sep 19, 2016 at 12:52:47AM -0500, Suravee Suthikulpanit wrote:
> Enabling AVIC implicitly disables the V_IRQ, V_INTR_PRIO, V_IGN_TPR,
> and V_INTR_VECTOR fields in the VMCB Control Word. Therefore, this patch
> introduces new interrupt injection code via AVIC backing page.
> 
> Also, the AVIC hardware automatically synchronizes TPR and CR8/vTPR, when
> values are updated. Therefore, xen does not need to handle this when enable
> AVIC.

s/this when enable AVIC/when AVIC is enabled/

> 
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> ---
>  xen/arch/x86/hvm/svm/avic.c        | 31 +++++++++++++++++++++++++++++++
>  xen/arch/x86/hvm/svm/intr.c        |  4 ++++
>  xen/arch/x86/hvm/svm/svm.c         | 12 ++++++++++--
>  xen/arch/x86/hvm/svm/vmcb.c        |  6 +++++-
>  xen/include/asm-x86/hvm/svm/avic.h |  1 +
>  5 files changed, 51 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c
> index cd8a9d4..4144223 100644
> --- a/xen/arch/x86/hvm/svm/avic.c
> +++ b/xen/arch/x86/hvm/svm/avic.c
> @@ -576,3 +576,34 @@ void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs)
>  
>      return;
>  }
> +
> +/***************************************************************

Twinkle twinkle little stars, there are too many of you..

> + * AVIC INTR INJECTION

Also the comment could be deleted as it explains pretty well the flow.
> + */
> +void svm_avic_deliver_posted_intr(struct vcpu *v, u8 vec)
> +{
> +    struct vlapic *vlapic = vcpu_vlapic(v);
> +
> +    /* Fallback to use non-AVIC if vcpu is not enabled with AVIC */

Please add an period at the end.

> +    if ( !svm_avic_vcpu_enabled(v) )
> +    {
> +        if ( !vlapic_test_and_set_vector(vec, &vlapic->regs->data[APIC_IRR]) )
> +            vcpu_kick(v);
> +        return;
> +    }
> +
> +    if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
> +        return;
> +
> +    if ( vlapic_test_and_set_vector(vec, &vlapic->regs->data[APIC_IRR]) )
> +        return;
> +
> +    /*
> +     * If vcpu is running on another cpu, hit the doorbell to signal
> +     * it to process interrupt. Otherwise, kick it.
> +     */
> +    if ( v->is_running && (v != current) )
> +        wrmsrl(AVIC_DOORBELL, cpu_data[v->processor].apicid);

What is the consequence if say the destination CPU ends up switching to
a different guest - and the doorball hits at that point?

If the different guest is not AVIC enabled .. what then? The CPU ignores
it? Say the CPU is running at that point without VMCB (it is running
dom0), or with an HVM guest without AVIC? Will we get AVIC IPI delievery
not completed #VMEXIT on the destination CPU? (or on this one?)

And what if this new guest is AVIC enabled and there are no IRR in the
backing page? [I presume nothing will happen]

> +    else
> +        vcpu_kick(v);
> +}
Jan Beulich Dec. 22, 2016, 11:36 a.m. UTC | #2
>>> On 19.09.16 at 07:52, <suravee.suthikulpanit@amd.com> wrote:
> Also, the AVIC hardware automatically synchronizes TPR and CR8/vTPR, when
> values are updated. Therefore, xen does not need to handle this when enable
> AVIC.

I'm having trouble matching this up with ...

> --- a/xen/arch/x86/hvm/svm/vmcb.c
> +++ b/xen/arch/x86/hvm/svm/vmcb.c
> @@ -93,10 +93,14 @@ static int construct_vmcb(struct vcpu *v)
>      vmcb->_dr_intercepts = ~0u;
>  
>      /* Intercept all control-register accesses except for CR2 and CR8. */
> -    vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
> +    if ( !svm_avic_vcpu_enabled(v) )
> +        vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
>                               CR_INTERCEPT_CR2_WRITE |
>                               CR_INTERCEPT_CR8_READ |
>                               CR_INTERCEPT_CR8_WRITE);
> +    else
> +        vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
> +                             CR_INTERCEPT_CR2_WRITE );

... this change, enabling CR8 intercepts in AVIC mode.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c
index cd8a9d4..4144223 100644
--- a/xen/arch/x86/hvm/svm/avic.c
+++ b/xen/arch/x86/hvm/svm/avic.c
@@ -576,3 +576,34 @@  void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs)
 
     return;
 }
+
+/***************************************************************
+ * AVIC INTR INJECTION
+ */
+void svm_avic_deliver_posted_intr(struct vcpu *v, u8 vec)
+{
+    struct vlapic *vlapic = vcpu_vlapic(v);
+
+    /* Fallback to use non-AVIC if vcpu is not enabled with AVIC */
+    if ( !svm_avic_vcpu_enabled(v) )
+    {
+        if ( !vlapic_test_and_set_vector(vec, &vlapic->regs->data[APIC_IRR]) )
+            vcpu_kick(v);
+        return;
+    }
+
+    if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
+        return;
+
+    if ( vlapic_test_and_set_vector(vec, &vlapic->regs->data[APIC_IRR]) )
+        return;
+
+    /*
+     * If vcpu is running on another cpu, hit the doorbell to signal
+     * it to process interrupt. Otherwise, kick it.
+     */
+    if ( v->is_running && (v != current) )
+        wrmsrl(AVIC_DOORBELL, cpu_data[v->processor].apicid);
+    else
+        vcpu_kick(v);
+}
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index bd94731..876d2ad 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -30,6 +30,7 @@ 
 #include <asm/hvm/io.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/vlapic.h>
+#include <asm/hvm/svm/avic.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/svm/intr.h>
 #include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
@@ -101,6 +102,9 @@  static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
     HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
                 vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
 
+    if ( svm_avic_vcpu_enabled(v) )
+        return;
+
     /*
      * Create a dummy virtual interrupt to intercept as soon as the
      * guest can accept the real interrupt.
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index aafbfa1..caf9984 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1091,7 +1091,8 @@  static void noreturn svm_do_resume(struct vcpu *v)
         hvm_asid_flush_vcpu(v);
     }
 
-    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
+    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) &&
+         !svm_avic_vcpu_enabled(v) )
     {
         vintr_t intr;
 
@@ -2337,7 +2338,8 @@  void svm_vmexit_handler(struct cpu_user_regs *regs)
      * NB. We need to preserve the low bits of the TPR to make checked builds
      * of Windows work, even though they don't actually do anything.
      */
-    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
+    if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) &&
+         !svm_avic_vcpu_enabled(v) )
     {
         intr = vmcb_get_vintr(vmcb);
         vlapic_set_reg(vlapic, APIC_TASKPRI,
@@ -2530,6 +2532,12 @@  void svm_vmexit_handler(struct cpu_user_regs *regs)
         u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
         intr = vmcb_get_vintr(vmcb);
 
+        if ( svm_avic_vcpu_enabled(v) )
+        {
+            gdprintk(XENLOG_ERR, "AVIC VINTR:\n");
+            domain_crash(v->domain);
+        }
+
         intr.fields.irq = 0;
         general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
 
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 9ee7fc7..9ea7627 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -93,10 +93,14 @@  static int construct_vmcb(struct vcpu *v)
     vmcb->_dr_intercepts = ~0u;
 
     /* Intercept all control-register accesses except for CR2 and CR8. */
-    vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
+    if ( !svm_avic_vcpu_enabled(v) )
+        vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
                              CR_INTERCEPT_CR2_WRITE |
                              CR_INTERCEPT_CR8_READ |
                              CR_INTERCEPT_CR8_WRITE);
+    else
+        vmcb->_cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
+                             CR_INTERCEPT_CR2_WRITE );
 
     /* I/O and MSR permission bitmaps. */
     arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE), 0);
diff --git a/xen/include/asm-x86/hvm/svm/avic.h b/xen/include/asm-x86/hvm/svm/avic.h
index 2c501d4..e1eb66c 100644
--- a/xen/include/asm-x86/hvm/svm/avic.h
+++ b/xen/include/asm-x86/hvm/svm/avic.h
@@ -40,4 +40,5 @@  int svm_avic_init_vmcb(struct vcpu *v);
 void svm_avic_vmexit_do_incomp_ipi(struct cpu_user_regs *regs);
 void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs);
 
+void svm_avic_deliver_posted_intr(struct vcpu *v, u8 vector);
 #endif /* _SVM_AVIC_H_ */