@@ -81,7 +81,8 @@ static union amd_iommu_pte set_iommu_pte
unsigned long dfn,
unsigned long next_mfn,
unsigned int level,
- bool iw, bool ir)
+ bool iw, bool ir,
+ bool *contig)
{
union amd_iommu_pte *table, *pde, old;
@@ -94,11 +95,15 @@ static union amd_iommu_pte set_iommu_pte
old.iw != iw || old.ir != ir )
{
set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
- pt_update_contig_markers(&table->raw, pfn_to_pde_idx(dfn, level),
- level, PTE_kind_leaf);
+ *contig = pt_update_contig_markers(&table->raw,
+ pfn_to_pde_idx(dfn, level),
+ level, PTE_kind_leaf);
}
else
+ {
old.pr = false; /* signal "no change" to the caller */
+ *contig = false;
+ }
unmap_domain_page(table);
@@ -409,6 +414,7 @@ int cf_check amd_iommu_map_page(
{
struct domain_iommu *hd = dom_iommu(d);
unsigned int level = (IOMMUF_order(flags) / PTE_PER_TABLE_SHIFT) + 1;
+ bool contig;
int rc;
unsigned long pt_mfn = 0;
union amd_iommu_pte old;
@@ -452,8 +458,26 @@ int cf_check amd_iommu_map_page(
/* Install mapping */
old = set_iommu_pte_present(pt_mfn, dfn_x(dfn), mfn_x(mfn), level,
- (flags & IOMMUF_writable),
- (flags & IOMMUF_readable));
+ flags & IOMMUF_writable,
+ flags & IOMMUF_readable, &contig);
+
+ while ( unlikely(contig) && ++level < hd->arch.amd.paging_mode )
+ {
+ struct page_info *pg = mfn_to_page(_mfn(pt_mfn));
+ unsigned long next_mfn;
+
+ if ( iommu_pde_from_dfn(d, dfn_x(dfn), level, &pt_mfn, flush_flags,
+ false) )
+ BUG();
+ BUG_ON(!pt_mfn);
+
+ next_mfn = mfn_x(mfn) & (~0UL << (PTE_PER_TABLE_SHIFT * (level - 1)));
+ set_iommu_pte_present(pt_mfn, dfn_x(dfn), next_mfn, level,
+ flags & IOMMUF_writable,
+ flags & IOMMUF_readable, &contig);
+ *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
+ iommu_queue_free_pgtable(hd, pg);
+ }
spin_unlock(&hd->arch.mapping_lock);