@@ -285,6 +285,18 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn,
spin_lock(&hd->arch.mapping_lock);
+ /*
+ * IOMMU mapping request can be safely ignored when the domain is dying.
+ *
+ * hd->arch.mapping_lock guarantees that d->is_dying will be observed
+ * before any page tables are freed (see iommu_free_pgtables()).
+ */
+ if ( d->is_dying )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ return 0;
+ }
+
rc = amd_iommu_alloc_root(d);
if ( rc )
{
@@ -1762,6 +1762,18 @@ static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
spin_lock(&hd->arch.mapping_lock);
+ /*
+ * IOMMU mapping request can be safely ignored when the domain is dying.
+ *
+ * hd->arch.mapping_lock guarantees that d->is_dying will be observed
+ * before any page tables are freed (see iommu_free_pgtables())
+ */
+ if ( d->is_dying )
+ {
+ spin_unlock(&hd->arch.mapping_lock);
+ return 0;
+ }
+
pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), 1);
if ( !pg_maddr )
{
@@ -267,6 +267,12 @@ int iommu_free_pgtables(struct domain *d)
struct page_info *pg;
unsigned int done = 0;
+ if ( !is_iommu_enabled(d) )
+ return 0;
+
+ /* After this barrier, no more IOMMU mapping can happen */
+ spin_barrier(&hd->arch.mapping_lock);
+
while ( (pg = page_list_remove_head(&hd->arch.pgtables.list)) )
{
free_domheap_page(pg);