@@ -2211,14 +2211,35 @@ static int __must_check cf_check intel_i
* While the (ab)use of PTE_kind_table here allows to save some work in
* the function, the main motivation for it is that it avoids a so far
* unexplained hang during boot (while preparing Dom0) on a Westmere
- * based laptop.
+ * based laptop. This also has the intended effect of terminating the
+ * loop when super pages aren't supported anymore at the next level.
*/
- pt_update_contig_markers(&page->val,
- address_level_offset(dfn_to_daddr(dfn), level),
- level,
- (hd->platform_ops->page_sizes &
- (1UL << level_to_offset_bits(level + 1))
- ? PTE_kind_leaf : PTE_kind_table));
+ while ( pt_update_contig_markers(&page->val,
+ address_level_offset(dfn_to_daddr(dfn), level),
+ level,
+ (hd->platform_ops->page_sizes &
+ (1UL << level_to_offset_bits(level + 1))
+ ? PTE_kind_leaf : PTE_kind_table)) )
+ {
+ struct page_info *pg = maddr_to_page(pg_maddr);
+
+ unmap_vtd_domain_page(page);
+
+ new.val &= ~(LEVEL_MASK << level_to_offset_bits(level));
+ dma_set_pte_superpage(new);
+
+ pg_maddr = addr_to_dma_page_maddr(d, dfn_to_daddr(dfn), ++level,
+ flush_flags, false);
+ BUG_ON(pg_maddr < PAGE_SIZE);
+
+ page = map_vtd_domain_page(pg_maddr);
+ pte = &page[address_level_offset(dfn_to_daddr(dfn), level)];
+ *pte = new;
+ iommu_sync_cache(pte, sizeof(*pte));
+
+ *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
+ iommu_queue_free_pgtable(hd, pg);
+ }
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
@@ -232,7 +232,7 @@ struct context_entry {
/* page table handling */
#define LEVEL_STRIDE (9)
-#define LEVEL_MASK ((1 << LEVEL_STRIDE) - 1)
+#define LEVEL_MASK (PTE_NUM - 1UL)
#define PTE_NUM (1 << LEVEL_STRIDE)
#define level_to_agaw(val) ((val) - 2)
#define agaw_to_level(val) ((val) + 2)