@@ -283,8 +283,12 @@ static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
{
int rc = iommu_update_ire_from_msi(&ch->msi, msg);
- if ( rc )
+ if ( rc < 0 )
return rc;
+ /*
+ * Always propagate writes, to avoid having to pass a flag for handling
+ * a forceful write in the resume from suspension case.
+ */
}
hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
@@ -422,7 +422,18 @@ static int cf_check vmx_pi_update_irte(const struct vcpu *v,
ASSERT_PDEV_LIST_IS_READ_LOCKED(msi_desc->dev->domain);
- return iommu_update_ire_from_msi(msi_desc, &msg);
+ rc = iommu_update_ire_from_msi(msi_desc, &msg);
+ if ( rc > 0 )
+ {
+ /*
+ * Callers of vmx_pi_update_irte() won't propagate the updated MSI
+ * fields to the hardware, must assert there are no changes.
+ */
+ ASSERT_UNREACHABLE();
+ rc = -EILSEQ;
+ }
+
+ return rc;
unlock_out:
spin_unlock_irq(&desc->lock);
@@ -184,7 +184,8 @@ void msi_compose_msg(unsigned vector, const cpumask_t *cpu_mask, struct msi_msg
MSI_DATA_VECTOR(vector);
}
-static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg,
+ bool force)
{
entry->msg = *msg;
@@ -194,7 +195,7 @@ static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
ASSERT(msg != &entry->msg);
rc = iommu_update_ire_from_msi(entry, msg);
- if ( rc )
+ if ( rc < 0 || (rc == 0 && !force) )
return rc;
}
@@ -259,7 +260,7 @@ void cf_check set_msi_affinity(struct irq_desc *desc, const cpumask_t *mask)
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
msg.dest32 = dest;
- write_msi_msg(msi_desc, &msg);
+ write_msi_msg(msi_desc, &msg, false);
}
void __msi_set_enable(pci_sbdf_t sbdf, int pos, int enable)
@@ -522,7 +523,7 @@ int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc,
desc->msi_desc = msidesc;
desc->handler = handler;
msi_compose_msg(desc->arch.vector, desc->arch.cpu_mask, &msg);
- ret = write_msi_msg(msidesc, &msg);
+ ret = write_msi_msg(msidesc, &msg, false);
if ( unlikely(ret) )
{
desc->handler = &no_irq_type;
@@ -1403,7 +1404,7 @@ int pci_restore_msi_state(struct pci_dev *pdev)
type = entry->msi_attrib.type;
msg = entry->msg;
- write_msi_msg(entry, &msg);
+ write_msi_msg(entry, &msg, true);
for ( i = 0; ; )
{
@@ -492,7 +492,7 @@ static int update_intremap_entry_from_msi_msg(
get_ivrs_mappings(iommu->seg)[alias_id].intremap_table);
}
- return 0;
+ return fresh;
}
static struct amd_iommu *_find_iommu_for_device(int seg, int bdf)
@@ -546,7 +546,7 @@ int cf_check amd_iommu_msi_msg_update_ire(
rc = update_intremap_entry_from_msi_msg(iommu, bdf, nr,
&msi_desc->remap_index,
msg, &data);
- if ( !rc )
+ if ( rc > 0 )
{
for ( i = 1; i < nr; ++i )
msi_desc[i].remap_index = msi_desc->remap_index + i;
@@ -506,6 +506,7 @@ static int msi_msg_to_remap_entry(
unsigned int index, i, nr = 1;
unsigned long flags;
const struct pi_desc *pi_desc = msi_desc->pi_desc;
+ bool alloc = false;
if ( msi_desc->msi_attrib.type == PCI_CAP_ID_MSI )
nr = msi_desc->msi.nvec;
@@ -529,6 +530,7 @@ static int msi_msg_to_remap_entry(
index = alloc_remap_entry(iommu, nr);
for ( i = 0; i < nr; ++i )
msi_desc[i].remap_index = index + i;
+ alloc = true;
}
else
index = msi_desc->remap_index;
@@ -601,7 +603,7 @@ static int msi_msg_to_remap_entry(
unmap_vtd_domain_page(iremap_entries);
spin_unlock_irqrestore(&iommu->intremap.lock, flags);
- return 0;
+ return alloc;
}
int cf_check msi_msg_write_remap_rte(
@@ -435,6 +435,12 @@ extern struct page_list_head iommu_pt_cleanup_list;
bool arch_iommu_use_permitted(const struct domain *d);
#ifdef CONFIG_X86
+/*
+ * Return values:
+ * - < 0 on error.
+ * - 0 on success and no need to write msi_msg to the hardware.
+ * - 1 on success and msi_msg must be propagated to the hardware.
+ */
static inline int iommu_update_ire_from_msi(
struct msi_desc *msi_desc, struct msi_msg *msg)
{