@@ -125,4 +125,7 @@ PERFCOUNTER(realmode_exits, "vmexit
PERFCOUNTER(pauseloop_exits, "vmexits from Pause-Loop Detection")
+PERFCOUNTER(iommu_pt_shatters, "IOMMU page table shatters")
+PERFCOUNTER(iommu_pt_coalesces, "IOMMU page table coalesces")
+
/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
@@ -345,6 +345,8 @@ static int iommu_pde_from_dfn(struct dom
level, PTE_kind_table);
*flush_flags |= IOMMU_FLUSHF_modified;
+
+ perfc_incr(iommu_pt_shatters);
}
/* Install lower level page table for non-present entries */
@@ -477,6 +479,7 @@ int cf_check amd_iommu_map_page(
flags & IOMMUF_readable, &contig);
*flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
iommu_queue_free_pgtable(hd, pg);
+ perfc_incr(iommu_pt_coalesces);
}
spin_unlock(&hd->arch.mapping_lock);
@@ -543,6 +546,7 @@ int cf_check amd_iommu_unmap_page(
clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level, &free);
*flush_flags |= IOMMU_FLUSHF_all;
iommu_queue_free_pgtable(hd, pg);
+ perfc_incr(iommu_pt_coalesces);
}
}
@@ -404,6 +404,8 @@ static uint64_t addr_to_dma_page_maddr(s
if ( flush_flags )
*flush_flags |= IOMMU_FLUSHF_modified;
+
+ perfc_incr(iommu_pt_shatters);
}
write_atomic(&pte->val, new_pte.val);
@@ -857,6 +859,7 @@ static int dma_pte_clear_one(struct doma
*flush_flags |= IOMMU_FLUSHF_all;
iommu_queue_free_pgtable(hd, pg);
+ perfc_incr(iommu_pt_coalesces);
}
spin_unlock(&hd->arch.mapping_lock);
@@ -2239,6 +2242,7 @@ static int __must_check cf_check intel_i
*flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
iommu_queue_free_pgtable(hd, pg);
+ perfc_incr(iommu_pt_coalesces);
}
spin_unlock(&hd->arch.mapping_lock);