@@ -258,7 +258,6 @@ int cf_check amd_iommu_get_reserved_devi
int __must_check cf_check amd_iommu_flush_iotlb_pages(
struct domain *d, dfn_t dfn, unsigned long page_count,
unsigned int flush_flags);
-int __must_check cf_check amd_iommu_flush_iotlb_all(struct domain *d);
void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int dev_id,
dfn_t dfn);
@@ -539,15 +539,18 @@ int cf_check amd_iommu_flush_iotlb_pages
{
unsigned long dfn_l = dfn_x(dfn);
- ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
- ASSERT(flush_flags);
+ if ( !(flush_flags & IOMMU_FLUSHF_all) )
+ {
+ ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+ ASSERT(flush_flags);
+ }
/* Unless a PTE was modified, no flush is required */
if ( !(flush_flags & IOMMU_FLUSHF_modified) )
return 0;
- /* If the range wraps then just flush everything */
- if ( dfn_l + page_count < dfn_l )
+ /* If so requested or if the range wraps then just flush everything. */
+ if ( (flush_flags & IOMMU_FLUSHF_all) || dfn_l + page_count < dfn_l )
{
amd_iommu_flush_all_pages(d);
return 0;
@@ -572,13 +575,6 @@ int cf_check amd_iommu_flush_iotlb_pages
return 0;
}
-
-int cf_check amd_iommu_flush_iotlb_all(struct domain *d)
-{
- amd_iommu_flush_all_pages(d);
-
- return 0;
-}
int amd_iommu_reserve_domain_unity_map(struct domain *d,
const struct ivrs_unity_map *map,
@@ -759,7 +759,6 @@ static const struct iommu_ops __initcons
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
.iotlb_flush = amd_iommu_flush_iotlb_pages,
- .iotlb_flush_all = amd_iommu_flush_iotlb_all,
.reassign_device = reassign_device,
.get_device_group_id = amd_iommu_group_id,
.enable_x2apic = iov_enable_xt,
@@ -1000,13 +1000,19 @@ out:
}
/* Xen IOMMU ops */
-static int __must_check ipmmu_iotlb_flush_all(struct domain *d)
+static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn,
+ unsigned long page_count,
+ unsigned int flush_flags)
{
struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv;
+ ASSERT(flush_flags);
+
if ( !xen_domain || !xen_domain->root_domain )
return 0;
+ /* The hardware doesn't support selective TLB flush. */
+
spin_lock(&xen_domain->lock);
ipmmu_tlb_invalidate(xen_domain->root_domain);
spin_unlock(&xen_domain->lock);
@@ -1014,16 +1020,6 @@ static int __must_check ipmmu_iotlb_flus
return 0;
}
-static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned long page_count,
- unsigned int flush_flags)
-{
- ASSERT(flush_flags);
-
- /* The hardware doesn't support selective TLB flush. */
- return ipmmu_iotlb_flush_all(d);
-}
-
static struct ipmmu_vmsa_domain *ipmmu_get_cache_domain(struct domain *d,
struct device *dev)
{
@@ -1360,7 +1356,6 @@ static const struct iommu_ops ipmmu_iomm
.hwdom_init = arch_iommu_hwdom_init,
.teardown = ipmmu_iommu_domain_teardown,
.iotlb_flush = ipmmu_iotlb_flush,
- .iotlb_flush_all = ipmmu_iotlb_flush_all,
.assign_device = ipmmu_assign_device,
.reassign_device = ipmmu_reassign_device,
.map_page = arm_iommu_map_page,
@@ -2649,11 +2649,17 @@ static int force_stage = 2;
*/
static u32 platform_features = ARM_SMMU_FEAT_COHERENT_WALK;
-static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
+static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
+ unsigned long page_count,
+ unsigned int flush_flags)
{
struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv;
struct iommu_domain *cfg;
+ ASSERT(flush_flags);
+
+ /* ARM SMMU v1 doesn't have flush by VMA and VMID */
+
spin_lock(&smmu_domain->lock);
list_for_each_entry(cfg, &smmu_domain->contexts, list) {
/*
@@ -2670,16 +2676,6 @@ static int __must_check arm_smmu_iotlb_f
return 0;
}
-static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned long page_count,
- unsigned int flush_flags)
-{
- ASSERT(flush_flags);
-
- /* ARM SMMU v1 doesn't have flush by VMA and VMID */
- return arm_smmu_iotlb_flush_all(d);
-}
-
static struct iommu_domain *arm_smmu_get_domain(struct domain *d,
struct device *dev)
{
@@ -2864,7 +2860,6 @@ static const struct iommu_ops arm_smmu_i
.add_device = arm_smmu_dt_add_device_generic,
.teardown = arm_smmu_iommu_domain_teardown,
.iotlb_flush = arm_smmu_iotlb_flush,
- .iotlb_flush_all = arm_smmu_iotlb_flush_all,
.assign_device = arm_smmu_assign_dev,
.reassign_device = arm_smmu_reassign_dev,
.map_page = arm_iommu_map_page,
@@ -3416,7 +3416,6 @@ static const struct iommu_ops arm_smmu_i
.hwdom_init = arch_iommu_hwdom_init,
.teardown = arm_smmu_iommu_xen_domain_teardown,
.iotlb_flush = arm_smmu_iotlb_flush,
- .iotlb_flush_all = arm_smmu_iotlb_flush_all,
.assign_device = arm_smmu_assign_dev,
.reassign_device = arm_smmu_reassign_dev,
.map_page = arm_iommu_map_page,
@@ -478,15 +478,12 @@ int iommu_iotlb_flush_all(struct domain
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush_all ||
+ if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush ||
!flush_flags )
return 0;
- /*
- * The operation does a full flush so we don't need to pass the
- * flush_flags in.
- */
- rc = iommu_call(hd->platform_ops, iotlb_flush_all, d);
+ rc = iommu_call(hd->platform_ops, iotlb_flush, d, INVALID_DFN, 0,
+ flush_flags | IOMMU_FLUSHF_all);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
@@ -772,18 +772,21 @@ static int __must_check cf_check iommu_f
struct domain *d, dfn_t dfn, unsigned long page_count,
unsigned int flush_flags)
{
- ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
- ASSERT(flush_flags);
+ if ( flush_flags & IOMMU_FLUSHF_all )
+ {
+ dfn = INVALID_DFN;
+ page_count = 0;
+ }
+ else
+ {
+ ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+ ASSERT(flush_flags);
+ }
return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified,
page_count);
}
-static int __must_check cf_check iommu_flush_iotlb_all(struct domain *d)
-{
- return iommu_flush_iotlb(d, INVALID_DFN, 0, 0);
-}
-
static void queue_free_pt(struct domain_iommu *hd, mfn_t mfn, unsigned int level)
{
if ( level > 1 )
@@ -3185,7 +3188,6 @@ static const struct iommu_ops __initcons
.resume = vtd_resume,
.crash_shutdown = vtd_crash_shutdown,
.iotlb_flush = iommu_flush_iotlb_pages,
- .iotlb_flush_all = iommu_flush_iotlb_all,
.get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
.dump_page_tables = vtd_dump_page_tables,
};
@@ -147,9 +147,11 @@ enum
{
_IOMMU_FLUSHF_added,
_IOMMU_FLUSHF_modified,
+ _IOMMU_FLUSHF_all,
};
#define IOMMU_FLUSHF_added (1u << _IOMMU_FLUSHF_added)
#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
+#define IOMMU_FLUSHF_all (1u << _IOMMU_FLUSHF_all)
int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned long page_count, unsigned int flags,
@@ -281,7 +283,6 @@ struct iommu_ops {
int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
unsigned long page_count,
unsigned int flush_flags);
- int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
void (*dump_page_tables)(struct domain *d);