@@ -491,8 +491,8 @@ static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
#include <asm/io_apic.h>
-static void amd_dump_p2m_table_level(struct page_info* pg, int level,
- paddr_t gpa, int indent)
+static void amd_dump_page_table_level(struct page_info* pg, int level,
+ paddr_t gpa, int indent)
{
paddr_t address;
struct amd_iommu_pte *table_vaddr;
@@ -529,7 +529,7 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level,
address = gpa + amd_offset_level_address(index, level);
if ( pde->next_level >= 1 )
- amd_dump_p2m_table_level(
+ amd_dump_page_table_level(
mfn_to_page(_mfn(pde->mfn)), pde->next_level,
address, indent + 1);
else
@@ -542,16 +542,16 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level,
unmap_domain_page(table_vaddr);
}
-static void amd_dump_p2m_table(struct domain *d)
+static void amd_dump_page_tables(struct domain *d)
{
const struct domain_iommu *hd = dom_iommu(d);
if ( !hd->arch.amd.root_table )
return;
- printk("p2m table has %d levels\n", hd->arch.amd.paging_mode);
- amd_dump_p2m_table_level(hd->arch.amd.root_table,
- hd->arch.amd.paging_mode, 0, 0);
+ printk("AMD IOMMU table has %d levels\n", hd->arch.amd.paging_mode);
+ amd_dump_page_table_level(hd->arch.amd.root_table,
+ hd->arch.amd.paging_mode, 0, 0);
}
static const struct iommu_ops __initconstrel _iommu_ops = {
@@ -578,7 +578,7 @@ static const struct iommu_ops __initconstrel _iommu_ops = {
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
.crash_shutdown = amd_iommu_crash_shutdown,
- .dump_p2m_table = amd_dump_p2m_table,
+ .dump_page_tables = amd_dump_page_tables,
};
static const struct iommu_init_ops __initconstrel _iommu_init_ops = {
@@ -22,7 +22,7 @@
#include <xen/keyhandler.h>
#include <xsm/xsm.h>
-static void iommu_dump_p2m_table(unsigned char key);
+static void iommu_dump_page_tables(unsigned char key);
unsigned int __read_mostly iommu_dev_iotlb_timeout = 1000;
integer_param("iommu_dev_iotlb_timeout", iommu_dev_iotlb_timeout);
@@ -212,7 +212,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
if ( !is_iommu_enabled(d) )
return;
- register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
+ register_keyhandler('o', &iommu_dump_page_tables, "dump iommu page tables", 0);
hd->platform_ops->hwdom_init(d);
}
@@ -533,16 +533,12 @@ bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
return is_iommu_enabled(d) && test_bit(feature, dom_iommu(d)->features);
}
-static void iommu_dump_p2m_table(unsigned char key)
+static void iommu_dump_page_tables(unsigned char key)
{
struct domain *d;
const struct iommu_ops *ops;
- if ( !iommu_enabled )
- {
- printk("IOMMU not enabled!\n");
- return;
- }
+ ASSERT(iommu_enabled);
ops = iommu_get_ops();
@@ -553,14 +549,7 @@ static void iommu_dump_p2m_table(unsigned char key)
if ( is_hardware_domain(d) || !is_iommu_enabled(d) )
continue;
- if ( iommu_use_hap_pt(d) )
- {
- printk("\ndomain%d IOMMU p2m table shared with MMU: \n", d->domain_id);
- continue;
- }
-
- printk("\ndomain%d IOMMU p2m table: \n", d->domain_id);
- ops->dump_p2m_table(d);
+ ops->dump_page_tables(d);
}
rcu_read_unlock(&domlist_read_lock);
@@ -2582,8 +2582,8 @@ static void vtd_resume(void)
}
}
-static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
- int indent)
+static void vtd_dump_page_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
+ int indent)
{
paddr_t address;
int i;
@@ -2612,8 +2612,8 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
address = gpa + offset_level_address(i, level);
if ( next_level >= 1 )
- vtd_dump_p2m_table_level(dma_pte_addr(*pte), next_level,
- address, indent + 1);
+ vtd_dump_page_table_level(dma_pte_addr(*pte), next_level,
+ address, indent + 1);
else
printk("%*sdfn: %08lx mfn: %08lx\n",
indent, "",
@@ -2624,17 +2624,19 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
unmap_vtd_domain_page(pt_vaddr);
}
-static void vtd_dump_p2m_table(struct domain *d)
+static void vtd_dump_page_tables(struct domain *d)
{
- const struct domain_iommu *hd;
+ const struct domain_iommu *hd = dom_iommu(d);
- if ( list_empty(&acpi_drhd_units) )
+ if ( iommu_use_hap_pt(d) )
+ {
+ printk("VT-D sharing EPT table\n");
return;
+ }
- hd = dom_iommu(d);
- printk("p2m table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw));
- vtd_dump_p2m_table_level(hd->arch.vtd.pgd_maddr,
- agaw_to_level(hd->arch.vtd.agaw), 0, 0);
+ printk("VT-D table has %d levels\n", agaw_to_level(hd->arch.vtd.agaw));
+ vtd_dump_page_table_level(hd->arch.vtd.pgd_maddr,
+ agaw_to_level(hd->arch.vtd.agaw), 0, 0);
}
static int __init intel_iommu_quarantine_init(struct domain *d)
@@ -2734,7 +2736,7 @@ static struct iommu_ops __initdata vtd_ops = {
.iotlb_flush = iommu_flush_iotlb_pages,
.iotlb_flush_all = iommu_flush_iotlb_all,
.get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
- .dump_p2m_table = vtd_dump_p2m_table,
+ .dump_page_tables = vtd_dump_page_tables,
};
const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
@@ -277,7 +277,7 @@ struct iommu_ops {
unsigned int flush_flags);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
- void (*dump_p2m_table)(struct domain *d);
+ void (*dump_page_tables)(struct domain *d);
#ifdef CONFIG_HAS_DEVICE_TREE
/*