diff mbox

[V4] x86/HVM: Introduce struct hvm_pi_ops (RESEND)

Message ID 8cedc7d9-662b-202c-ebd5-379be452487d@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Suravee Suthikulpanit Jan. 17, 2017, 2:22 a.m. UTC
From 2b8e159039e3e70414d49932e558b0d26b44be11 Mon Sep 17 00:00:00 2001
From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Date: Sat, 17 Sep 2016 01:19:49 -0500
Subject: [PATCH V4] x86/HVM: Introduce struct hvm_pi_ops

The current function pointers in struct vmx_domain for managing hvm
posted interrupt can be used also by SVM AVIC. Therefore, this patch
introduces the struct hvm_pi_ops in the struct hvm_domain to hold them.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
---

RESENDING NOTE:
   * Sorry, I forgot to fix the stray blanks in
     struct hvm_pi_ops declaration.

Changes in V4:
   * Re-word comments for the struct hvm_pi_ops.
   * Move the comment for VMX per Kevin's request.

  xen/arch/x86/hvm/vmx/vmx.c         | 75 ++++++++++++++++++++++++++++++--------
  xen/include/asm-x86/hvm/domain.h   | 29 +++++++++++++++
  xen/include/asm-x86/hvm/hvm.h      |  4 +-
  xen/include/asm-x86/hvm/vmx/vmcs.h | 59 ------------------------------
  4 files changed, 90 insertions(+), 77 deletions(-)

Comments

Suravee Suthikulpanit Jan. 17, 2017, 2:30 a.m. UTC | #1
GRR... sorry again for confusion. Sending error.
Please ignore version4. I'll send out V5 instead then.

Suravee
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 7b2c50c..3db4ed9 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -198,18 +198,61 @@  static void vmx_pi_do_resume(struct vcpu *v)
      spin_unlock_irqrestore(pi_blocking_list_lock, flags);
  }

+/*
+ * To handle posted interrupts correctly, we need to set the following
+ * state:
+ *
+ * * The PI notification vector (NV)
+ * * The PI notification destination processor (NDST)
+ * * The PI "suppress notification" bit (SN)
+ * * The vcpu pi "blocked" list
+ *
+ * VMX implements the runstate transitions as the following:
+ *
+ * A: ... -> running
+ *  - SN = 0
+ *  - NDST = v->processor
+ *  If a VM is currently running, we want the PI delivered to the guest vcpu
+ *  on the proper pcpu.
+ *
+ * B: running -> ...
+ *  - SN = 1
+ *
+ * C: ... -> blocked
+ *  - SN = 0
+ *  - NV = pi_wakeup_vector
+ *  - Add vcpu to blocked list
+ *  If the vm is blocked, we want the PI delivered to Xen so that it can
+ *  wake it up.
+ *
+ * D: ... -> vmentry
+ *  - SN = 0
+ *  - NV = posted_intr_vector
+ *  - Take vcpu off blocked list
+ *
+ *  If the VM is currently either preempted or offline (i.e., not running
+ *  because of some reason other than blocking waiting for an interrupt),
+ *  there's nothing Xen can do -- we want the interrupt pending bit set in
+ *  the guest, but we don't want to bother Xen with an interrupt (SN clear).
+ *
+ * There's a brief window of time between vmx_intr_assist() and checking
+ * softirqs where if an interrupt comes in it may be lost; so we need Xen
+ * to get an interrupt and raise a softirq so that it will go through the
+ * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
+ */
+
  /* This function is called when pcidevs_lock is held */
  void vmx_pi_hooks_assign(struct domain *d)
  {
      if ( !iommu_intpost || !has_hvm_container_domain(d) )
          return;

-    ASSERT(!d->arch.hvm_domain.vmx.vcpu_block);
+    ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);

-    d->arch.hvm_domain.vmx.vcpu_block = vmx_vcpu_block;
-    d->arch.hvm_domain.vmx.pi_switch_from = vmx_pi_switch_from;
-    d->arch.hvm_domain.vmx.pi_switch_to = vmx_pi_switch_to;
-    d->arch.hvm_domain.vmx.pi_do_resume = vmx_pi_do_resume;
+    d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
+    d->arch.hvm_domain.pi_ops.switch_from = vmx_pi_switch_from;
+    d->arch.hvm_domain.pi_ops.switch_to = vmx_pi_switch_to;
+    d->arch.hvm_domain.pi_ops.do_resume = vmx_pi_do_resume;
  }

  /* This function is called when pcidevs_lock is held */
@@ -218,12 +261,12 @@  void vmx_pi_hooks_deassign(struct domain *d)
      if ( !iommu_intpost || !has_hvm_container_domain(d) )
          return;

-    ASSERT(d->arch.hvm_domain.vmx.vcpu_block);
+    ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);

-    d->arch.hvm_domain.vmx.vcpu_block = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_from = NULL;
-    d->arch.hvm_domain.vmx.pi_switch_to = NULL;
-    d->arch.hvm_domain.vmx.pi_do_resume = NULL;
+    d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
+    d->arch.hvm_domain.pi_ops.switch_from = NULL;
+    d->arch.hvm_domain.pi_ops.switch_to = NULL;
+    d->arch.hvm_domain.pi_ops.do_resume = NULL;
  }

  static int vmx_domain_initialise(struct domain *d)
@@ -901,8 +944,8 @@  static void vmx_ctxt_switch_from(struct vcpu *v)
      vmx_restore_host_msrs();
      vmx_save_dr(v);

-    if ( v->domain->arch.hvm_domain.vmx.pi_switch_from )
-        v->domain->arch.hvm_domain.vmx.pi_switch_from(v);
+    if ( v->domain->arch.hvm_domain.pi_ops.switch_from )
+        v->domain->arch.hvm_domain.pi_ops.switch_from(v);
  }

  static void vmx_ctxt_switch_to(struct vcpu *v)
@@ -916,8 +959,8 @@  static void vmx_ctxt_switch_to(struct vcpu *v)
      vmx_restore_guest_msrs(v);
      vmx_restore_dr(v);

-    if ( v->domain->arch.hvm_domain.vmx.pi_switch_to )
-        v->domain->arch.hvm_domain.vmx.pi_switch_to(v);
+    if ( v->domain->arch.hvm_domain.pi_ops.switch_to )
+        v->domain->arch.hvm_domain.pi_ops.switch_to(v);
  }


@@ -3963,8 +4006,8 @@  void vmx_vmenter_helper(const struct cpu_user_regs *regs)
      struct hvm_vcpu_asid *p_asid;
      bool_t need_flush;

-    if ( curr->domain->arch.hvm_domain.vmx.pi_do_resume )
-        curr->domain->arch.hvm_domain.vmx.pi_do_resume(curr);
+    if ( curr->domain->arch.hvm_domain.pi_ops.do_resume )
+        curr->domain->arch.hvm_domain.pi_ops.do_resume(curr);

      if ( !cpu_has_vmx_vpid )
          goto out;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index f34d784..de1f930 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -72,6 +72,33 @@  struct hvm_ioreq_server {
      bool_t                 bufioreq_atomic;
  };

+/*
+ * This structure defines function hooks to support hardware-assisted
+ * virtual interrupt delivery to guest. (e.g. VMX PI and SVM AVIC).
+ *
+ * These hooks are defined by the underlying arch-specific code
+ * as needed. For example:
+ *   - When the domain is enabled with virtual IPI delivery
+ *   - When the domain is enabled with virtual I/O int delivery
+ *     and actually has a physical device assigned .
+ */
+struct hvm_pi_ops {
+    /* Hook into ctx_switch_from. */
+    void (*switch_from) (struct vcpu *v);
+
+    /* Hook into ctx_switch_to */
+    void (*switch_to) (struct vcpu *v);
+
+    /*
+     * Hook into arch_vcpu_block(), which is called
+     *  from vcpu_block() and vcpu_do_poll().
+     */
+    void (*vcpu_block) (struct vcpu *);
+
+    /* Hook into the vmentry path. */
+    void (*do_resume) (struct vcpu *v);
+};
+
  struct hvm_domain {
      /* Guest page range used for non-default ioreq servers */
      struct {
@@ -148,6 +175,8 @@  struct hvm_domain {
          struct list_head list;
      } write_map;

+    struct hvm_pi_ops pi_ops;
+
      union {
          struct vmx_domain vmx;
          struct svm_domain svm;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7e7462e..b1e4c75 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -638,8 +638,8 @@  unsigned long hvm_cr4_guest_reserved_bits(const struct vcpu *v, bool_t restore);
      struct vcpu *v_ = (v);                                      \
      struct domain *d_ = v_->domain;                             \
      if ( has_hvm_container_domain(d_) &&                        \
-         (cpu_has_vmx && d_->arch.hvm_domain.vmx.vcpu_block) )  \
-        d_->arch.hvm_domain.vmx.vcpu_block(v_);                 \
+         (d_->arch.hvm_domain.pi_ops.vcpu_block) )          \
+        d_->arch.hvm_domain.pi_ops.vcpu_block(v_);          \
  })

  #endif /* __ASM_X86_HVM_HVM_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 997f4f5..4ec8b08 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -77,65 +77,6 @@  struct vmx_domain {
      unsigned long apic_access_mfn;
      /* VMX_DOMAIN_* */
      unsigned int status;
-
-    /*
-     * To handle posted interrupts correctly, we need to set the following
-     * state:
-     *
-     * * The PI notification vector (NV)
-     * * The PI notification destination processor (NDST)
-     * * The PI "suppress notification" bit (SN)
-     * * The vcpu pi "blocked" list
-     *
-     * If a VM is currently running, we want the PI delivered to the guest vcpu
-     * on the proper pcpu (NDST = v->processor, SN clear).
-     *
-     * If the vm is blocked, we want the PI delivered to Xen so that it can
-     * wake it up  (SN clear, NV = pi_wakeup_vector, vcpu on block list).
-     *
-     * If the VM is currently either preempted or offline (i.e., not running
-     * because of some reason other than blocking waiting for an interrupt),
-     * there's nothing Xen can do -- we want the interrupt pending bit set in
-     * the guest, but we don't want to bother Xen with an interrupt (SN clear).
-     *
-     * There's a brief window of time between vmx_intr_assist() and checking
-     * softirqs where if an interrupt comes in it may be lost; so we need Xen
-     * to get an interrupt and raise a softirq so that it will go through the
-     * vmx_intr_assist() path again (SN clear, NV = posted_interrupt).
-     *
-     * The way we implement this now is by looking at what needs to happen on
-     * the following runstate transitions:
-     *
-     * A: runnable -> running
-     *  - SN = 0
-     *  - NDST = v->processor
-     * B: running -> runnable
-     *  - SN = 1
-     * C: running -> blocked
-     *  - NV = pi_wakeup_vector
-     *  - Add vcpu to blocked list
-     * D: blocked -> runnable
-     *  - NV = posted_intr_vector
-     *  - Take vcpu off blocked list
-     *
-     * For transitions A and B, we add hooks into vmx_ctxt_switch_{from,to}
-     * paths.
-     *
-     * For transition C, we add a new arch hook, arch_vcpu_block(), which is
-     * called from vcpu_block() and vcpu_do_poll().
-     *
-     * For transition D, rather than add an extra arch hook on vcpu_wake, we
-     * add a hook on the vmentry path which checks to see if either of the two
-     * actions need to be taken.
-     *
-     * These hooks only need to be called when the domain in question actually
-     * has a physical device assigned to it, so we set and clear the callbacks
-     * as appropriate when device assignment changes.
-     */
-    void (*vcpu_block) (struct vcpu *);
-    void (*pi_switch_from) (struct vcpu *v);
-    void (*pi_switch_to) (struct vcpu *v);
-    void (*pi_do_resume) (struct vcpu *v);
  };

  struct pi_desc {