@@ -101,8 +101,8 @@ int vmsi_deliver(
void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *pirq_dpci)
{
- uint32_t flags = pirq_dpci->gmsi.gflags;
- int vector = pirq_dpci->gmsi.gvec;
+ uint32_t flags = pirq_dpci->gmsi.legacy.gflags;
+ int vector = pirq_dpci->gmsi.legacy.gvec;
uint8_t dest = (uint8_t)flags;
uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
uint8_t delivery_mode = (flags & VMSI_DELIV_MASK)
@@ -348,8 +348,8 @@ int pt_irq_create_bind(
{
pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
HVM_IRQ_DPCI_GUEST_MSI;
- pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
+ pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
/*
* 'pt_irq_create_bind' can be called after 'pt_irq_destroy_bind'.
* The 'pirq_cleanup_check' which would free the structure is only
@@ -381,8 +381,8 @@ int pt_irq_create_bind(
}
if ( unlikely(rc) )
{
- pirq_dpci->gmsi.gflags = 0;
- pirq_dpci->gmsi.gvec = 0;
+ pirq_dpci->gmsi.legacy.gflags = 0;
+ pirq_dpci->gmsi.legacy.gvec = 0;
pirq_dpci->dom = NULL;
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
@@ -401,20 +401,20 @@ int pt_irq_create_bind(
}
/* If pirq is already mapped as vmsi, update guest data/addr. */
- if ( pirq_dpci->gmsi.gvec != pt_irq_bind->u.msi.gvec ||
- pirq_dpci->gmsi.gflags != pt_irq_bind->u.msi.gflags )
+ if ( pirq_dpci->gmsi.legacy.gvec != pt_irq_bind->u.msi.gvec ||
+ pirq_dpci->gmsi.legacy.gflags != pt_irq_bind->u.msi.gflags )
{
/* Directly clear pending EOIs before enabling new MSI info. */
pirq_guest_eoi(info);
- pirq_dpci->gmsi.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
+ pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
}
}
/* Calculate dest_vcpu_id for MSI-type pirq migration. */
- dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
- dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
- delivery_mode = (pirq_dpci->gmsi.gflags & VMSI_DELIV_MASK) >>
+ dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+ dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
+ delivery_mode = (pirq_dpci->gmsi.legacy.gflags & VMSI_DELIV_MASK) >>
GFLAGS_SHIFT_DELIV_MODE;
dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
@@ -427,7 +427,7 @@ int pt_irq_create_bind(
{
if ( delivery_mode == dest_LowestPrio )
vcpu = vector_hashing_dest(d, dest, dest_mode,
- pirq_dpci->gmsi.gvec);
+ pirq_dpci->gmsi.legacy.gvec);
if ( vcpu )
pirq_dpci->gmsi.posted = true;
}
@@ -437,7 +437,7 @@ int pt_irq_create_bind(
/* Use interrupt posting if it is supported. */
if ( iommu_intpost )
pi_update_irte(vcpu ? &vcpu->arch.hvm_vmx.pi_desc : NULL,
- info, pirq_dpci->gmsi.gvec);
+ info, pirq_dpci->gmsi.legacy.gvec);
break;
}
@@ -817,10 +817,10 @@ static int _hvm_dpci_msi_eoi(struct domain *d,
int vector = (long)arg;
if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
- (pirq_dpci->gmsi.gvec == vector) )
+ (pirq_dpci->gmsi.legacy.gvec == vector) )
{
- int dest = pirq_dpci->gmsi.gflags & VMSI_DEST_ID_MASK;
- int dest_mode = !!(pirq_dpci->gmsi.gflags & VMSI_DM_MASK);
+ int dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+ int dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest,
dest_mode) )
@@ -62,8 +62,12 @@ struct dev_intx_gsi_link {
#define GFLAGS_SHIFT_TRG_MODE 15
struct hvm_gmsi_info {
- uint32_t gvec;
- uint32_t gflags;
+ union {
+ struct {
+ uint32_t gvec;
+ uint32_t gflags;
+ } legacy;
+ };
int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
bool posted; /* directly deliver to guest via VT-d PI? */
};