@@ -167,15 +167,14 @@ int amd_iommu_set_root_page_table(struct amd_iommu_dte *dte,
{
bool valid = flags & SET_ROOT_VALID;
- if ( dte->v && dte->tv &&
- (cpu_has_cx16 || (flags & SET_ROOT_WITH_UNITY_MAP)) )
+ if ( dte->v && dte->tv )
{
union {
struct amd_iommu_dte dte;
uint64_t raw64[4];
__uint128_t raw128[2];
} ldte = { .dte = *dte };
- __uint128_t old = ldte.raw128[0];
+ __uint128_t res, old = ldte.raw128[0];
int ret = 0;
ldte.dte.domain_id = domain_id;
@@ -185,33 +184,20 @@ int amd_iommu_set_root_page_table(struct amd_iommu_dte *dte,
ldte.dte.paging_mode = paging_mode;
ldte.dte.v = valid;
- if ( cpu_has_cx16 )
- {
- __uint128_t res = cmpxchg16b(dte, &old, &ldte.raw128[0]);
+ res = cmpxchg16b(dte, &old, &ldte.raw128[0]);
- /*
- * Hardware does not update the DTE behind our backs, so the
- * return value should match "old".
- */
- if ( res != old )
- {
- printk(XENLOG_ERR
- "Dom%d: unexpected DTE %016lx_%016lx (expected %016lx_%016lx)\n",
- domain_id,
- (uint64_t)(res >> 64), (uint64_t)res,
- (uint64_t)(old >> 64), (uint64_t)old);
- ret = -EILSEQ;
- }
- }
- else /* Best effort, updating domain_id last. */
+ /*
+ * Hardware does not update the DTE behind our backs, so the
+ * return value should match "old".
+ */
+ if ( res != old )
{
- uint64_t *ptr = (void *)dte;
-
- write_atomic(ptr + 0, ldte.raw64[0]);
- /* No barrier should be needed between these two. */
- write_atomic(ptr + 1, ldte.raw64[1]);
-
- ret = 1;
+ printk(XENLOG_ERR
+ "Dom%d: unexpected DTE %016lx_%016lx (expected %016lx_%016lx)\n",
+ domain_id,
+ (uint64_t)(res >> 64), (uint64_t)res,
+ (uint64_t)(old >> 64), (uint64_t)old);
+ ret = -EILSEQ;
}
return ret;
@@ -1485,7 +1485,7 @@ int domain_context_mapping_one(
{
struct domain_iommu *hd = dom_iommu(domain);
struct context_entry *context, *context_entries, lctxt;
- __uint128_t old;
+ __uint128_t res, old;
uint64_t maddr;
uint16_t seg = iommu->drhd->segment, prev_did = 0;
struct domain *prev_dom = NULL;
@@ -1583,55 +1583,23 @@ int domain_context_mapping_one(
ASSERT(!context_fault_disable(lctxt));
}
- if ( cpu_has_cx16 )
- {
- __uint128_t res = cmpxchg16b(context, &old, &lctxt.full);
-
- /*
- * Hardware does not update the context entry behind our backs,
- * so the return value should match "old".
- */
- if ( res != old )
- {
- if ( pdev )
- check_cleanup_domid_map(domain, pdev, iommu);
- printk(XENLOG_ERR
- "%pp: unexpected context entry %016lx_%016lx (expected %016lx_%016lx)\n",
- &PCI_SBDF(seg, bus, devfn),
- (uint64_t)(res >> 64), (uint64_t)res,
- (uint64_t)(old >> 64), (uint64_t)old);
- rc = -EILSEQ;
- goto unlock;
- }
- }
- else if ( !prev_dom || !(mode & MAP_WITH_RMRR) )
- {
- context_clear_present(*context);
- iommu_sync_cache(context, sizeof(*context));
+ res = cmpxchg16b(context, &old, &lctxt.full);
- write_atomic(&context->hi, lctxt.hi);
- /* No barrier should be needed between these two. */
- write_atomic(&context->lo, lctxt.lo);
- }
- else /* Best effort, updating DID last. */
+ /*
+ * Hardware does not update the context entry behind our backs,
+ * so the return value should match "old".
+ */
+ if ( res != old )
{
- /*
- * By non-atomically updating the context entry's DID field last,
- * during a short window in time TLB entries with the old domain ID
- * but the new page tables may be inserted. This could affect I/O
- * of other devices using this same (old) domain ID. Such updating
- * therefore is not a problem if this was the only device associated
- * with the old domain ID. Diverting I/O of any of a dying domain's
- * devices to the quarantine page tables is intended anyway.
- */
- if ( !(mode & (MAP_OWNER_DYING | MAP_SINGLE_DEVICE)) )
- printk(XENLOG_WARNING VTDPREFIX
- " %pp: reassignment may cause %pd data corruption\n",
- &PCI_SBDF(seg, bus, devfn), prev_dom);
-
- write_atomic(&context->lo, lctxt.lo);
- /* No barrier should be needed between these two. */
- write_atomic(&context->hi, lctxt.hi);
+ if ( pdev )
+ check_cleanup_domid_map(domain, pdev, iommu);
+ printk(XENLOG_ERR
+ "%pp: unexpected context entry %016lx_%016lx (expected %016lx_%016lx)\n",
+ &PCI_SBDF(seg, bus, devfn),
+ (uint64_t)(res >> 64), (uint64_t)res,
+ (uint64_t)(old >> 64), (uint64_t)old);
+ rc = -EILSEQ;
+ goto unlock;
}
iommu_sync_cache(context, sizeof(struct context_entry));
@@ -2702,12 +2670,7 @@ static int __init cf_check vtd_setup(void)
iommu_intremap = iommu_intremap_off;
#ifndef iommu_intpost
- /*
- * We cannot use posted interrupt if X86_FEATURE_CX16 is
- * not supported, since we count on this feature to
- * atomically update 16-byte IRTE in posted format.
- */
- if ( !cap_intr_post(iommu->cap) || !iommu_intremap || !cpu_has_cx16 )
+ if ( !cap_intr_post(iommu->cap) || !iommu_intremap )
iommu_intpost = false;
#endif