@@ -499,8 +499,6 @@ void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
vioapic_update_EOI(vector);
- hvm_dpci_msi_eoi(vector);
-
spin_lock_irqsave(&vlapic->callback_lock, flags);
callback = vlapic->callbacks[index].callback;
vlapic->callbacks[index].callback = NULL;
@@ -44,11 +44,9 @@
#include <asm/event.h>
#include <asm/io_apic.h>
-static void vmsi_inj_irq(
- struct vlapic *target,
- uint8_t vector,
- uint8_t trig_mode,
- uint8_t delivery_mode)
+static void vmsi_inj_irq(struct vlapic *target, uint8_t vector,
+ uint8_t trig_mode, uint8_t delivery_mode,
+ vlapic_eoi_callback_t *callback, void *data)
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vmsi_inj_irq: vec %02x trig %d dm %d\n",
vector, trig_mode, delivery_mode);
@@ -57,17 +55,17 @@ static void vmsi_inj_irq(
{
case dest_Fixed:
case dest_LowestPrio:
- vlapic_set_irq(target, vector, trig_mode);
+ vlapic_set_irq_callback(target, vector, trig_mode, callback, data);
break;
default:
BUG();
}
}
-int vmsi_deliver(
- struct domain *d, int vector,
- uint8_t dest, uint8_t dest_mode,
- uint8_t delivery_mode, uint8_t trig_mode)
+static int vmsi_deliver_callback(struct domain *d, int vector, uint8_t dest,
+ uint8_t dest_mode, uint8_t delivery_mode,
+ uint8_t trig_mode,
+ vlapic_eoi_callback_t *callback, void *data)
{
struct vlapic *target;
struct vcpu *v;
@@ -78,7 +76,8 @@ int vmsi_deliver(
target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
if ( target != NULL )
{
- vmsi_inj_irq(target, vector, trig_mode, delivery_mode);
+ vmsi_inj_irq(target, vector, trig_mode, delivery_mode, callback,
+ data);
break;
}
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "null MSI round robin: vector=%02x\n",
@@ -89,8 +88,8 @@ int vmsi_deliver(
for_each_vcpu ( d, v )
if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
0, dest, dest_mode) )
- vmsi_inj_irq(vcpu_vlapic(v), vector,
- trig_mode, delivery_mode);
+ vmsi_inj_irq(vcpu_vlapic(v), vector, trig_mode, delivery_mode,
+ callback, data);
break;
default:
@@ -103,6 +102,13 @@ int vmsi_deliver(
return 0;
}
+int vmsi_deliver(struct domain *d, int vector, uint8_t dest, uint8_t dest_mode,
+ uint8_t delivery_mode, uint8_t trig_mode)
+{
+ return vmsi_deliver_callback(d, vector, dest, dest_mode, delivery_mode,
+ trig_mode, NULL, NULL);
+}
+
void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
{
uint32_t flags = pirq_dpci->gmsi.gflags;
@@ -119,7 +125,8 @@ void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
ASSERT(pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI);
- vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
+ vmsi_deliver_callback(d, vector, dest, dest_mode, delivery_mode, trig_mode,
+ hvm_dpci_msi_eoi, d);
}
/* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
@@ -796,13 +796,9 @@ static int _hvm_dpci_msi_eoi(struct domain *d,
return 0;
}
-void hvm_dpci_msi_eoi(unsigned int vector)
+void hvm_dpci_msi_eoi(unsigned int vector, void *data)
{
- struct domain *d = current->domain;
-
- if ( !is_iommu_enabled(d) ||
- (!hvm_domain_irq(d)->dpci && !is_hardware_domain(d)) )
- return;
+ struct domain *d = data;
spin_lock(&d->event_lock);
pt_pirq_iterate(d, _hvm_dpci_msi_eoi, (void *)(long)vector);
@@ -142,7 +142,7 @@ struct hvm_hw_stdvga {
void stdvga_init(struct domain *d);
void stdvga_deinit(struct domain *d);
-extern void hvm_dpci_msi_eoi(unsigned int vector);
+void hvm_dpci_msi_eoi(unsigned int vector, void *data);
/* Decode a PCI port IO access into a bus/slot/func/reg. */
unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr,
Remove the unconditional call to hvm_dpci_msi_eoi in vlapic_handle_EOI and instead use the newly introduced EOI callback mechanism in order to register a callback for MSI vectors injected from passed through devices. This avoids having multiple callback functions open-coded in vlapic_handle_EOI, as there is now a generic framework for registering such callbacks. It also avoids doing an unconditional call to hvm_dpci_msi_eoi for each EOI processed by the local APIC. Note that now the callback is only registered (and thus executed) when there's an MSI interrupt originating from a PCI passthrough device being injected into the guest, so the check in hvm_dpci_msi_eoi can be removed as it's already done by hvm_dirq_assist which is the only caller of vmsi_deliver_pirq. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Changes since v2: - Expand commit message. - Pass the domain as the callback data. - Remove the check in hvm_dpci_msi_eoi --- xen/arch/x86/hvm/vlapic.c | 2 -- xen/arch/x86/hvm/vmsi.c | 35 ++++++++++++++++++------------- xen/drivers/passthrough/x86/hvm.c | 8 ++----- xen/include/asm-x86/hvm/io.h | 2 +- 4 files changed, 24 insertions(+), 23 deletions(-)