@@ -381,9 +381,18 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn,
return reassign_device(pdev->domain, d, devfn, pdev);
}
+static void amd_iommu_clear_root_pgtable(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+
+ spin_lock(&hd->arch.mapping_lock);
+ hd->arch.amd.root_table = NULL;
+ spin_unlock(&hd->arch.mapping_lock);
+}
+
static void amd_iommu_domain_destroy(struct domain *d)
{
- dom_iommu(d)->arch.amd.root_table = NULL;
+ ASSERT(!dom_iommu(d)->arch.amd.root_table);
}
static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
@@ -565,6 +574,7 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
.remove_device = amd_iommu_remove_device,
.assign_device = amd_iommu_assign_device,
.teardown = amd_iommu_domain_destroy,
+ .clear_root_pgtable = amd_iommu_clear_root_pgtable,
.map_page = amd_iommu_map_page,
.unmap_page = amd_iommu_unmap_page,
.iotlb_flush = amd_iommu_flush_iotlb_pages,
@@ -1726,6 +1726,15 @@ out:
return ret;
}
+static void iommu_clear_root_pgtable(struct domain *d)
+{
+ struct domain_iommu *hd = dom_iommu(d);
+
+ spin_lock(&hd->arch.mapping_lock);
+ hd->arch.vtd.pgd_maddr = 0;
+ spin_unlock(&hd->arch.mapping_lock);
+}
+
static void iommu_domain_teardown(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
@@ -1740,7 +1749,7 @@ static void iommu_domain_teardown(struct domain *d)
xfree(mrmrr);
}
- hd->arch.vtd.pgd_maddr = 0;
+ ASSERT(!hd->arch.vtd.pgd_maddr);
}
static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
@@ -2731,6 +2740,7 @@ static struct iommu_ops __initdata vtd_ops = {
.remove_device = intel_iommu_remove_device,
.assign_device = intel_iommu_assign_device,
.teardown = iommu_domain_teardown,
+ .clear_root_pgtable = iommu_clear_root_pgtable,
.map_page = intel_iommu_map_page,
.unmap_page = intel_iommu_unmap_page,
.lookup_page = intel_iommu_lookup_page,
@@ -149,6 +149,13 @@ int arch_iommu_domain_init(struct domain *d)
void arch_iommu_domain_destroy(struct domain *d)
{
+ /*
+ * There should be not page-tables left allocated by the time the
+ * domain is destroyed. Note that arch_iommu_domain_destroy() is
+ * called unconditionally, so pgtables may be unitialized.
+ */
+ ASSERT(dom_iommu(d)->platform_ops == NULL ||
+ page_list_empty(&dom_iommu(d)->arch.pgtables.list));
}
static bool __hwdom_init hwdom_iommu_map(const struct domain *d,
@@ -273,6 +280,12 @@ int iommu_free_pgtables(struct domain *d)
/* After this barrier, no more IOMMU mapping can happen */
spin_barrier(&hd->arch.mapping_lock);
+ /*
+ * Pages will be moved to the free list below. So we want to
+ * clear the root page-table to avoid any potential use after-free.
+ */
+ hd->platform_ops->clear_root_pgtable(d);
+
while ( (pg = page_list_remove_head(&hd->arch.pgtables.list)) )
{
free_domheap_page(pg);
@@ -272,6 +272,7 @@ struct iommu_ops {
int (*adjust_irq_affinities)(void);
void (*sync_cache)(const void *addr, unsigned int size);
+ void (*clear_root_pgtable)(struct domain *d);
#endif /* CONFIG_X86 */
int __must_check (*suspend)(void);