@@ -597,31 +597,34 @@ static int msi_msg_to_remap_entry(
memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
- /* Set interrupt remapping table entry */
- new_ire.remap.fpd = 0;
- new_ire.remap.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
- new_ire.remap.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
- new_ire.remap.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
- /* Hardware require RH = 1 for LPR delivery mode */
- new_ire.remap.rh = (new_ire.remap.dlm == dest_LowestPrio);
- new_ire.remap.avail = 0;
- new_ire.remap.res_1 = 0;
- new_ire.remap.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
- MSI_DATA_VECTOR_MASK;
- new_ire.remap.res_2 = 0;
- if ( x2apic_enabled )
- new_ire.remap.dst = msg->dest32;
- else
- new_ire.remap.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
- & 0xff) << 8;
-
if ( pdev )
set_msi_source_id(pdev, &new_ire);
else
set_hpet_source_id(msi_desc->hpet_id, &new_ire);
- new_ire.remap.res_3 = 0;
- new_ire.remap.res_4 = 0;
- new_ire.remap.p = 1; /* finally, set present bit */
+
+ if ( !new_ire.remap.p || !new_ire.remap.im )
+ {
+ /* Set interrupt remapping table entry */
+ new_ire.remap.fpd = 0;
+ new_ire.remap.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+ new_ire.remap.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+ new_ire.remap.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
+ /* Hardware require RH = 1 for LPR delivery mode */
+ new_ire.remap.rh = (new_ire.remap.dlm == dest_LowestPrio);
+ new_ire.remap.avail = 0;
+ new_ire.remap.res_1 = 0;
+ new_ire.remap.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
+ MSI_DATA_VECTOR_MASK;
+ new_ire.remap.res_2 = 0;
+ if ( x2apic_enabled )
+ new_ire.remap.dst = msg->dest32;
+ else
+ new_ire.remap.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
+ & 0xff) << 8;
+ new_ire.remap.res_3 = 0;
+ new_ire.remap.res_4 = 0;
+ new_ire.remap.p = 1; /* finally, set present bit */
+ }
/* now construct new MSI/MSI-X rte entry */
remap_rte = (struct msi_msg_remap_entry *)msg;
@@ -637,9 +640,12 @@ static int msi_msg_to_remap_entry(
remap_rte->address_hi = 0;
remap_rte->data = index - i;
- memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
- iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
- iommu_flush_iec_index(iommu, 0, index);
+ if ( iremap_entry->val != new_ire.val )
+ {
+ memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
+ iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
+ iommu_flush_iec_index(iommu, 0, index);
+ }
unmap_vtd_domain_page(iremap_entries);
spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
We don't set the affinity for posted format IRTE, since the destination of these interrupts is vCPU and the vCPU affinity is set during vCPU scheduling. Signed-off-by: Feng Wu <feng.wu@intel.com> --- v7: - Compare all the field in IRTE to justify whether we can suppress the update v6: - Make pi_can_suppress_irte_update() a check-only function - Introduce another function pi_get_new_irte() to update the 'new_ire' if needed v5: - Only suppress affinity related IRTE updates for PI v4: - Keep the construction of new_ire and only modify the hardware IRTE when it is not in posted mode. xen/drivers/passthrough/vtd/intremap.c | 54 +++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 24 deletions(-)