diff mbox

[RFC,1/9] x86/HVM: Introduce struct hvm_pi_ops

Message ID 1474264368-4104-2-git-send-email-suravee.suthikulpanit@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Suravee Suthikulpanit Sept. 19, 2016, 5:52 a.m. UTC
The current function pointers for managing hvm posted interrupt
can be used also by SVM AVIC. Therefore, this patch introduces the
struct hvm_pi_ops in the struct hvm_domain to hold them.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 xen/arch/x86/hvm/vmx/vmx.c         | 32 +++++++++----------
 xen/include/asm-x86/hvm/domain.h   | 63 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h      |  4 +--
 xen/include/asm-x86/hvm/vmx/vmcs.h | 59 -----------------------------------
 4 files changed, 81 insertions(+), 77 deletions(-)

Comments

Konrad Rzeszutek Wilk Oct. 12, 2016, 5:01 p.m. UTC | #1
On Mon, Sep 19, 2016 at 12:52:40AM -0500, Suravee Suthikulpanit wrote:
> The current function pointers for managing hvm posted interrupt
> can be used also by SVM AVIC. Therefore, this patch introduces the
> struct hvm_pi_ops in the struct hvm_domain to hold them.
> 
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>

You seemed to have forgotten to CC the VMX maintainers:

INTEL(R) VT FOR X86 (VT-X)                                                         
M:  Jun Nakajima <jun.nakajima@intel.com>                                          
M:  Kevin Tian <kevin.tian@intel.com>                                              
S:  Supported                                           
?

Regardlesss of that, Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> ---
>  xen/arch/x86/hvm/vmx/vmx.c         | 32 +++++++++----------
>  xen/include/asm-x86/hvm/domain.h   | 63 ++++++++++++++++++++++++++++++++++++++
>  xen/include/asm-x86/hvm/hvm.h      |  4 +--
>  xen/include/asm-x86/hvm/vmx/vmcs.h | 59 -----------------------------------
>  4 files changed, 81 insertions(+), 77 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 2759e6f..8620697 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -204,12 +204,12 @@ void vmx_pi_hooks_assign(struct domain *d)
>      if ( !iommu_intpost || !has_hvm_container_domain(d) )
>          return;
>  
> -    ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
> +    ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
>  
> -    d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
> -    d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
> -    d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
> -    d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
> +    d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
> +    d->arch.hvm_domain.pi_ops.pi_switch_from = vmx_pi_switch_from;
> +    d->arch.hvm_domain.pi_ops.pi_switch_to = vmx_pi_switch_to;
> +    d->arch.hvm_domain.pi_ops.pi_do_resume = vmx_pi_do_resume;
>  }
>  
>  /* This function is called when pcidevs_lock is held */
> @@ -218,12 +218,12 @@ void vmx_pi_hooks_deassign(struct domain *d)
>      if ( !iommu_intpost || !has_hvm_container_domain(d) )
>          return;
>  
> -    ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
> +    ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
>  
> -    d->arch.hvm_domain.vmx.vcpu_block = NULL;
> -    d->arch.hvm_domain.vmx.pi_switch_from = NULL;
> -    d->arch.hvm_domain.vmx.pi_switch_to = NULL;
> -    d->arch.hvm_domain.vmx.pi_do_resume = NULL;
> +    d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
> +    d->arch.hvm_domain.pi_ops.pi_switch_from = NULL;
> +    d->arch.hvm_domain.pi_ops.pi_switch_to = NULL;
> +    d->arch.hvm_domain.pi_ops.pi_do_resume = NULL;
>  }
>  
>  static int vmx_domain_initialise(struct domain *d)
> @@ -901,8 +901,8 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
>      vmx_restore_host_msrs();
>      vmx_save_dr(v);
>  
> -    if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
> -        v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
> +    if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
> +        v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
>  }
>  
>  static void vmx_ctxt_switch_to(struct vcpu *v)
> @@ -916,8 +916,8 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
>      vmx_restore_guest_msrs(v);
>      vmx_restore_dr(v);
>  
> -    if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
> -        v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
> +    if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
> +        v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
>  }
>  
>  
> @@ -3914,8 +3914,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
>      struct hvm_vcpu_asid *p_asid;
>      bool_t need_flush;
>  
> -    if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
> -        curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
> +    if ( curr->domain->arch.hvm_domain.pi_ops.pi_do_resume )
> +        curr->domain->arch.hvm_domain.pi_ops.pi_do_resume(curr);
>  
>      if ( !cpu_has_vmx_vpid )
>          goto out;
> diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
> index f34d784..779927b 100644
> --- a/xen/include/asm-x86/hvm/domain.h
> +++ b/xen/include/asm-x86/hvm/domain.h
> @@ -72,6 +72,67 @@ struct hvm_ioreq_server {
>      bool_t                 bufioreq_atomic;
>  };
>  
> +struct hvm_pi_ops {
> +    /*
> +     * To handle posted interrupts correctly, we need to set the following
> +     * state:
> +     *
> +     * * The PI notification vector (NV)
> +     * * The PI notification destination processor (NDST)
> +     * * The PI "suppress notification" bit (SN)
> +     * * The vcpu pi "blocked" list
> +     *
> +     * If a VM is currently running, we want the PI delivered to the guest vcpu
> +     * on the proper pcpu (NDST = v->processor, SN clear).
> +     *
> +     * If the vm is blocked, we want the PI delivered to Xen so that it can
> +     * wake it up  (SN clear, NV = pi_wakeup_vector, vcpu on block list).
> +     *
> +     * If the VM is currently either preempted or offline (i.e., not running
> +     * because of some reason other than blocking waiting for an interrupt),
> +     * there's nothing Xen can do -- we want the interrupt pending bit set in
> +     * the guest, but we don't want to bother Xen with an interrupt (SN clear).
> +     *
> +     * There's a brief window of time between vmx_intr_assist() and checking
> +     * softirqs where if an interrupt comes in it may be lost; so we need Xen
> +     * to get an interrupt and raise a softirq so that it will go through the
> +     * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
> +     *
> +     * The way we implement this now is by looking at what needs to happen on
> +     * the following runstate transitions:
> +     *
> +     * A: runnable -> running
> +     *  - SN = 0
> +     *  - NDST = v->processor
> +     * B: running -> runnable
> +     *  - SN = 1
> +     * C: running -> blocked
> +     *  - NV = pi_wakeup_vector
> +     *  - Add vcpu to blocked list
> +     * D: blocked -> runnable
> +     *  - NV = posted_intr_vector
> +     *  - Take vcpu off blocked list
> +     *
> +     * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
> +     * paths.
> +     *
> +     * For transition C, we add a new arch hook, arch_vcpu_block(), which is
> +     * called from vcpu_block() and vcpu_do_poll().
> +     *
> +     * For transition D, rather than add an extra arch hook on vcpu_wake, we
> +     * add a hook on the vmentry path which checks to see if either of the two
> +     * actions need to be taken.
> +     *
> +     * These hooks only need to be called when the domain in question actually
> +     * has a physical device assigned to it, so we set and clear the callbacks
> +     * as appropriate when device assignment changes.
> +     */
> +    void (*vcpu_block) (struct vcpu *);
> +    void (*pi_switch_from) (struct vcpu *v);
> +    void (*pi_switch_to) (struct vcpu *v);
> +    void (*pi_do_resume) (struct vcpu *v);
> +};
> +
>  struct hvm_domain {
>      /* Guest page range used for non-default ioreq servers */
>      struct {
> @@ -148,6 +209,8 @@ struct hvm_domain {
>          struct list_head list;
>      } write_map;
>  
> +    struct hvm_pi_ops pi_ops;
> +
>      union {
>          struct vmx_domain vmx;
>          struct svm_domain svm;
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 81b60d5..c832d9a 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -621,8 +621,8 @@ unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore);
>      struct vcpu *v_ = (v);                                      \
>      struct domain *d_ = v_->domain;                             \
>      if ( has_hvm_container_domain(d_) &&                        \
> -         (cpu_has_vmx && d_->arch.hvm_domain.vmx.vcpu_block) )  \
> -        d_->arch.hvm_domain.vmx.vcpu_block(v_);                 \
> +         (d_->arch.hvm_domain.pi_ops.vcpu_block) )          \
> +        d_->arch.hvm_domain.pi_ops.vcpu_block(v_);          \
>  })
>  
>  #endif /* __ASM_X86_HVM_HVM_H__ */
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 997f4f5..4ec8b08 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -77,65 +77,6 @@ struct vmx_domain {
>      unsigned long apic_access_mfn;
>      /* VMX_DOMAIN_* */
>      unsigned int status;
> -
> -    /*
> -     * To handle posted interrupts correctly, we need to set the following
> -     * state:
> -     *
> -     * * The PI notification vector (NV)
> -     * * The PI notification destination processor (NDST)
> -     * * The PI "suppress notification" bit (SN)
> -     * * The vcpu pi "blocked" list
> -     *
> -     * If a VM is currently running, we want the PI delivered to the guest vcpu
> -     * on the proper pcpu (NDST = v->processor, SN clear).
> -     *
> -     * If the vm is blocked, we want the PI delivered to Xen so that it can
> -     * wake it up  (SN clear, NV = pi_wakeup_vector, vcpu on block list).
> -     *
> -     * If the VM is currently either preempted or offline (i.e., not running
> -     * because of some reason other than blocking waiting for an interrupt),
> -     * there's nothing Xen can do -- we want the interrupt pending bit set in
> -     * the guest, but we don't want to bother Xen with an interrupt (SN clear).
> -     *
> -     * There's a brief window of time between vmx_intr_assist() and checking
> -     * softirqs where if an interrupt comes in it may be lost; so we need Xen
> -     * to get an interrupt and raise a softirq so that it will go through the
> -     * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
> -     *
> -     * The way we implement this now is by looking at what needs to happen on
> -     * the following runstate transitions:
> -     *
> -     * A: runnable -> running
> -     *  - SN = 0
> -     *  - NDST = v->processor
> -     * B: running -> runnable
> -     *  - SN = 1
> -     * C: running -> blocked
> -     *  - NV = pi_wakeup_vector
> -     *  - Add vcpu to blocked list
> -     * D: blocked -> runnable
> -     *  - NV = posted_intr_vector
> -     *  - Take vcpu off blocked list
> -     *
> -     * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
> -     * paths.
> -     *
> -     * For transition C, we add a new arch hook, arch_vcpu_block(), which is
> -     * called from vcpu_block() and vcpu_do_poll().
> -     *
> -     * For transition D, rather than add an extra arch hook on vcpu_wake, we
> -     * add a hook on the vmentry path which checks to see if either of the two
> -     * actions need to be taken.
> -     *
> -     * These hooks only need to be called when the domain in question actually
> -     * has a physical device assigned to it, so we set and clear the callbacks
> -     * as appropriate when device assignment changes.
> -     */
> -    void (*vcpu_block) (struct vcpu *);
> -    void (*pi_switch_from) (struct vcpu *v);
> -    void (*pi_switch_to) (struct vcpu *v);
> -    void (*pi_do_resume) (struct vcpu *v);
>  };
>  
>  struct pi_desc {
> -- 
> 1.9.1
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2759e6f..8620697 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -204,12 +204,12 @@  void vmx_pi_hooks_assign(struct domain *d)
     if ( !iommu_intpost || !has_hvm_container_domain(d) )
         return;
 
-    ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
+    ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
 
-    d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
-    d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
-    d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
-    d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+    d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
+    d->arch.hvm_domain.pi_ops.pi_switch_from = vmx_pi_switch_from;
+    d->arch.hvm_domain.pi_ops.pi_switch_to = vmx_pi_switch_to;
+    d->arch.hvm_domain.pi_ops.pi_do_resume = vmx_pi_do_resume;
 }
 
 /* This function is called when pcidevs_lock is held */
@@ -218,12 +218,12 @@  void vmx_pi_hooks_deassign(struct domain *d)
     if ( !iommu_intpost || !has_hvm_container_domain(d) )
         return;
 
-    ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
+    ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
 
-    d->arch.hvm_domain.vmx.vcpu_block = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_from = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_to = NULL;
-    d->arch.hvm_domain.vmx.pi_do_resume = NULL;
+    d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
+    d->arch.hvm_domain.pi_ops.pi_switch_from = NULL;
+    d->arch.hvm_domain.pi_ops.pi_switch_to = NULL;
+    d->arch.hvm_domain.pi_ops.pi_do_resume = NULL;
 }
 
 static int vmx_domain_initialise(struct domain *d)
@@ -901,8 +901,8 @@  static void vmx_ctxt_switch_from(struct vcpu *v)
     vmx_restore_host_msrs();
     vmx_save_dr(v);
 
-    if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
-        v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
+    if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from )
+        v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v);
 }
 
 static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -916,8 +916,8 @@  static void vmx_ctxt_switch_to(struct vcpu *v)
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
 
-    if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
-        v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
+    if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to )
+        v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v);
 }
 
 
@@ -3914,8 +3914,8 @@  void vmx_vmenter_helper(const struct cpu_user_regs *regs)
     struct hvm_vcpu_asid *p_asid;
     bool_t need_flush;
 
-    if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
-        curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
+    if ( curr->domain->arch.hvm_domain.pi_ops.pi_do_resume )
+        curr->domain->arch.hvm_domain.pi_ops.pi_do_resume(curr);
 
     if ( !cpu_has_vmx_vpid )
         goto out;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f34d784..779927b 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -72,6 +72,67 @@  struct hvm_ioreq_server {
     bool_t                 bufioreq_atomic;
 };
 
+struct hvm_pi_ops {
+    /*
+     * To handle posted interrupts correctly, we need to set the following
+     * state:
+     *
+     * * The PI notification vector (NV)
+     * * The PI notification destination processor (NDST)
+     * * The PI "suppress notification" bit (SN)
+     * * The vcpu pi "blocked" list
+     *
+     * If a VM is currently running, we want the PI delivered to the guest vcpu
+     * on the proper pcpu (NDST = v->processor, SN clear).
+     *
+     * If the vm is blocked, we want the PI delivered to Xen so that it can
+     * wake it up  (SN clear, NV = pi_wakeup_vector, vcpu on block list).
+     *
+     * If the VM is currently either preempted or offline (i.e., not running
+     * because of some reason other than blocking waiting for an interrupt),
+     * there's nothing Xen can do -- we want the interrupt pending bit set in
+     * the guest, but we don't want to bother Xen with an interrupt (SN clear).
+     *
+     * There's a brief window of time between vmx_intr_assist() and checking
+     * softirqs where if an interrupt comes in it may be lost; so we need Xen
+     * to get an interrupt and raise a softirq so that it will go through the
+     * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
+     *
+     * The way we implement this now is by looking at what needs to happen on
+     * the following runstate transitions:
+     *
+     * A: runnable -> running
+     *  - SN = 0
+     *  - NDST = v->processor
+     * B: running -> runnable
+     *  - SN = 1
+     * C: running -> blocked
+     *  - NV = pi_wakeup_vector
+     *  - Add vcpu to blocked list
+     * D: blocked -> runnable
+     *  - NV = posted_intr_vector
+     *  - Take vcpu off blocked list
+     *
+     * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
+     * paths.
+     *
+     * For transition C, we add a new arch hook, arch_vcpu_block(), which is
+     * called from vcpu_block() and vcpu_do_poll().
+     *
+     * For transition D, rather than add an extra arch hook on vcpu_wake, we
+     * add a hook on the vmentry path which checks to see if either of the two
+     * actions need to be taken.
+     *
+     * These hooks only need to be called when the domain in question actually
+     * has a physical device assigned to it, so we set and clear the callbacks
+     * as appropriate when device assignment changes.
+     */
+    void (*vcpu_block) (struct vcpu *);
+    void (*pi_switch_from) (struct vcpu *v);
+    void (*pi_switch_to) (struct vcpu *v);
+    void (*pi_do_resume) (struct vcpu *v);
+};
+
 struct hvm_domain {
     /* Guest page range used for non-default ioreq servers */
     struct {
@@ -148,6 +209,8 @@  struct hvm_domain {
         struct list_head list;
     } write_map;
 
+    struct hvm_pi_ops pi_ops;
+
     union {
         struct vmx_domain vmx;
         struct svm_domain svm;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 81b60d5..c832d9a 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -621,8 +621,8 @@  unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore);
     struct vcpu *v_ = (v);                                      \
     struct domain *d_ = v_->domain;                             \
     if ( has_hvm_container_domain(d_) &&                        \
-         (cpu_has_vmx && d_->arch.hvm_domain.vmx.vcpu_block) )  \
-        d_->arch.hvm_domain.vmx.vcpu_block(v_);                 \
+         (d_->arch.hvm_domain.pi_ops.vcpu_block) )          \
+        d_->arch.hvm_domain.pi_ops.vcpu_block(v_);          \
 })
 
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 997f4f5..4ec8b08 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -77,65 +77,6 @@  struct vmx_domain {
     unsigned long apic_access_mfn;
     /* VMX_DOMAIN_* */
     unsigned int status;
-
-    /*
-     * To handle posted interrupts correctly, we need to set the following
-     * state:
-     *
-     * * The PI notification vector (NV)
-     * * The PI notification destination processor (NDST)
-     * * The PI "suppress notification" bit (SN)
-     * * The vcpu pi "blocked" list
-     *
-     * If a VM is currently running, we want the PI delivered to the guest vcpu
-     * on the proper pcpu (NDST = v->processor, SN clear).
-     *
-     * If the vm is blocked, we want the PI delivered to Xen so that it can
-     * wake it up  (SN clear, NV = pi_wakeup_vector, vcpu on block list).
-     *
-     * If the VM is currently either preempted or offline (i.e., not running
-     * because of some reason other than blocking waiting for an interrupt),
-     * there's nothing Xen can do -- we want the interrupt pending bit set in
-     * the guest, but we don't want to bother Xen with an interrupt (SN clear).
-     *
-     * There's a brief window of time between vmx_intr_assist() and checking
-     * softirqs where if an interrupt comes in it may be lost; so we need Xen
-     * to get an interrupt and raise a softirq so that it will go through the
-     * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
-     *
-     * The way we implement this now is by looking at what needs to happen on
-     * the following runstate transitions:
-     *
-     * A: runnable -> running
-     *  - SN = 0
-     *  - NDST = v->processor
-     * B: running -> runnable
-     *  - SN = 1
-     * C: running -> blocked
-     *  - NV = pi_wakeup_vector
-     *  - Add vcpu to blocked list
-     * D: blocked -> runnable
-     *  - NV = posted_intr_vector
-     *  - Take vcpu off blocked list
-     *
-     * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
-     * paths.
-     *
-     * For transition C, we add a new arch hook, arch_vcpu_block(), which is
-     * called from vcpu_block() and vcpu_do_poll().
-     *
-     * For transition D, rather than add an extra arch hook on vcpu_wake, we
-     * add a hook on the vmentry path which checks to see if either of the two
-     * actions need to be taken.
-     *
-     * These hooks only need to be called when the domain in question actually
-     * has a physical device assigned to it, so we set and clear the callbacks
-     * as appropriate when device assignment changes.
-     */
-    void (*vcpu_block) (struct vcpu *);
-    void (*pi_switch_from) (struct vcpu *v);
-    void (*pi_switch_to) (struct vcpu *v);
-    void (*pi_do_resume) (struct vcpu *v);
 };
 
 struct pi_desc {