@@ -836,6 +836,12 @@ void __init x2apic_bsp_setup(void)
if ( !cpu_has_x2apic )
return;
+ if ( unlikely(!cpu_has_cx16) )
+ {
+ printk("x2APIC: CPU doesn't support CMPXCHG16B, disabling\n");
+ return;
+ }
+
if ( !opt_x2apic )
{
if ( !x2apic_enabled )
@@ -173,47 +173,26 @@ bool __init cf_check intel_iommu_supports_eim(void)
* Assume iremap_lock has been acquired. It is to make sure software will not
* change the same IRTE behind us. With this assumption, if only high qword or
* low qword in IRTE is to be updated, this function's atomic variant can
- * present an atomic update to VT-d hardware even when cmpxchg16b
- * instruction is not supported.
+ * present an atomic update to VT-d hardware.
*/
static void update_irte(struct vtd_iommu *iommu, struct iremap_entry *entry,
const struct iremap_entry *new_ire, bool atomic)
{
- ASSERT(spin_is_locked(&iommu->intremap.lock));
-
- if ( cpu_has_cx16 )
- {
- __uint128_t ret;
- struct iremap_entry old_ire;
+ __uint128_t ret;
+ struct iremap_entry old_ire;
- old_ire = *entry;
- ret = cmpxchg16b(entry, &old_ire, new_ire);
+ ASSERT(spin_is_locked(&iommu->intremap.lock));
+
+ old_ire = *entry;
+ ret = cmpxchg16b(entry, &old_ire, new_ire);
- /*
- * In the above, we use cmpxchg16 to atomically update the 128-bit
- * IRTE, and the hardware cannot update the IRTE behind us, so
- * the return value of cmpxchg16 should be the same as old_ire.
- * This ASSERT validate it.
- */
- ASSERT(ret == old_ire.val);
- }
- else
- {
- /*
- * VT-d hardware doesn't update IRTEs behind us, nor the software
- * since we hold iremap_lock. If the caller wants VT-d hardware to
- * always see a consistent entry, but we can't meet it, a bug will
- * be raised.
- */
- if ( entry->lo == new_ire->lo )
- write_atomic(&entry->hi, new_ire->hi);
- else if ( entry->hi == new_ire->hi )
- write_atomic(&entry->lo, new_ire->lo);
- else if ( !atomic )
- *entry = *new_ire;
- else
- BUG();
- }
+ /*
+ * In the above, we use cmpxchg16 to atomically update the 128-bit
+ * IRTE, and the hardware cannot update the IRTE behind us, so
+ * the return value of cmpxchg16 should be the same as old_ire.
+ * This ASSERT validate it.
+ */
+ ASSERT(ret == old_ire.val);
}
/* Mark specified intr remap entry as free */
@@ -395,7 +374,6 @@ static int ioapic_rte_to_remap_entry(struct vtd_iommu *iommu,
/* Indicate remap format. */
remap_rte->format = 1;
- /* If cmpxchg16b is not available the caller must mask the IO-APIC pin. */
update_irte(iommu, iremap_entry, &new_ire, !init && !masked);
iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
iommu_flush_iec_index(iommu, 0, index);
@@ -437,21 +415,6 @@ void cf_check io_apic_write_remap_rte(
bool masked = true;
int rc;
- if ( !cpu_has_cx16 )
- {
- /*
- * Cannot atomically update the IRTE entry: mask the IO-APIC pin to
- * avoid interrupts seeing an inconsistent IRTE entry.
- */
- old_rte = __ioapic_read_entry(apic, pin, true);
- if ( !old_rte.mask )
- {
- masked = false;
- old_rte.mask = 1;
- __ioapic_write_entry(apic, pin, true, old_rte);
- }
- }
-
/* Not the initializer, for old gcc to cope. */
new_rte.raw = rte;
@@ -1482,7 +1482,7 @@ int domain_context_mapping_one(
{
struct domain_iommu *hd = dom_iommu(domain);
struct context_entry *context, *context_entries, lctxt;
- __uint128_t old;
+ __uint128_t res, old;
uint64_t maddr;
uint16_t seg = iommu->drhd->segment, prev_did = 0;
struct domain *prev_dom = NULL;
@@ -1580,55 +1580,23 @@ int domain_context_mapping_one(
ASSERT(!context_fault_disable(lctxt));
}
- if ( cpu_has_cx16 )
- {
- __uint128_t res = cmpxchg16b(context, &old, &lctxt.full);
+ res = cmpxchg16b(context, &old, &lctxt.full);
- /*
- * Hardware does not update the context entry behind our backs,
- * so the return value should match "old".
- */
- if ( res != old )
- {
- if ( pdev )
- check_cleanup_domid_map(domain, pdev, iommu);
- printk(XENLOG_ERR
- "%pp: unexpected context entry %016lx_%016lx (expected %016lx_%016lx)\n",
- &PCI_SBDF(seg, bus, devfn),
- (uint64_t)(res >> 64), (uint64_t)res,
- (uint64_t)(old >> 64), (uint64_t)old);
- rc = -EILSEQ;
- goto unlock;
- }
- }
- else if ( !prev_dom || !(mode & MAP_WITH_RMRR) )
- {
- context_clear_present(*context);
- iommu_sync_cache(context, sizeof(*context));
-
- write_atomic(&context->hi, lctxt.hi);
- /* No barrier should be needed between these two. */
- write_atomic(&context->lo, lctxt.lo);
- }
- else /* Best effort, updating DID last. */
+ /*
+ * Hardware does not update the context entry behind our backs,
+ * so the return value should match "old".
+ */
+ if ( res != old )
{
- /*
- * By non-atomically updating the context entry's DID field last,
- * during a short window in time TLB entries with the old domain ID
- * but the new page tables may be inserted. This could affect I/O
- * of other devices using this same (old) domain ID. Such updating
- * therefore is not a problem if this was the only device associated
- * with the old domain ID. Diverting I/O of any of a dying domain's
- * devices to the quarantine page tables is intended anyway.
- */
- if ( !(mode & (MAP_OWNER_DYING | MAP_SINGLE_DEVICE)) )
- printk(XENLOG_WARNING VTDPREFIX
- " %pp: reassignment may cause %pd data corruption\n",
- &PCI_SBDF(seg, bus, devfn), prev_dom);
-
- write_atomic(&context->lo, lctxt.lo);
- /* No barrier should be needed between these two. */
- write_atomic(&context->hi, lctxt.hi);
+ if ( pdev )
+ check_cleanup_domid_map(domain, pdev, iommu);
+ printk(XENLOG_ERR
+ "%pp: unexpected context entry %016lx_%016lx (expected %016lx_%016lx)\n",
+ &PCI_SBDF(seg, bus, devfn),
+ (uint64_t)(res >> 64), (uint64_t)res,
+ (uint64_t)(old >> 64), (uint64_t)old);
+ rc = -EILSEQ;
+ goto unlock;
}
iommu_sync_cache(context, sizeof(struct context_entry));
@@ -2630,6 +2598,15 @@ static int __init cf_check vtd_setup(void)
int ret;
bool reg_inval_supported = true;
+ if ( unlikely(!cpu_has_cx16) )
+ {
+ printk(XENLOG_ERR VTDPREFIX
+ "IOMMU: CPU doesn't support CMPXCHG16B, disabling\n");
+
+ ret = -ENOSYS;
+ goto error;
+ }
+
if ( list_empty(&acpi_drhd_units) )
{
ret = -ENODEV;
@@ -2692,12 +2669,7 @@ static int __init cf_check vtd_setup(void)
iommu_intremap = iommu_intremap_off;
#ifndef iommu_intpost
- /*
- * We cannot use posted interrupt if X86_FEATURE_CX16 is
- * not supported, since we count on this feature to
- * atomically update 16-byte IRTE in posted format.
- */
- if ( !cap_intr_post(iommu->cap) || !iommu_intremap || !cpu_has_cx16 )
+ if ( !cap_intr_post(iommu->cap) || !iommu_intremap )
iommu_intpost = false;
#endif
No hardware has VT-d support while not having cx16 support, disable IOMMU in this case to avoid potentially buggy code. Now that IOMMU is only enabled if cx16 is supported, drop dead code that handles cases where cx16 isn't supported. Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com> Signed-off-by: Teddy Astie <teddy.astie@vates.tech> --- xen/arch/x86/apic.c | 6 ++ xen/drivers/passthrough/vtd/intremap.c | 65 +++++---------------- xen/drivers/passthrough/vtd/iommu.c | 80 +++++++++----------------- 3 files changed, 46 insertions(+), 105 deletions(-)