Message ID | 20190716120056.1723-2-aisaila@bitdefender.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v3,1/2] x86/mm: Clean IOMMU flags from p2m-pt code | expand |
On 16.07.2019 14:01, Alexandru Stefan ISAILA wrote: > At this moment IOMMU pt sharing is disabled by commit [1]. > > This patch cleans the unreachable code garded by iommu_hap_pt_share. > > [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 > > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> Reviewed-by: Jan Beulich <jbeulich@suse.com>
On Tue, Jul 16, 2019 at 12:01:15PM +0000, Alexandru Stefan ISAILA wrote: > At this moment IOMMU pt sharing is disabled by commit [1]. > > This patch cleans the unreachable code garded by iommu_hap_pt_share. > > [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 > > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> Acked-by: Brian Woods <brian.woods@amd.com> > --- > xen/drivers/passthrough/amd/iommu_map.c | 28 ------------------- > xen/drivers/passthrough/amd/pci_amd_iommu.c | 4 --- > xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 3 -- > 3 files changed, 35 deletions(-) > > diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c > index cbf00e9e72..90cc7075c2 100644 > --- a/xen/drivers/passthrough/amd/iommu_map.c > +++ b/xen/drivers/passthrough/amd/iommu_map.c > @@ -364,9 +364,6 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, > int rc; > unsigned long pt_mfn[7]; > > - if ( iommu_use_hap_pt(d) ) > - return 0; > - > memset(pt_mfn, 0, sizeof(pt_mfn)); > > spin_lock(&hd->arch.mapping_lock); > @@ -420,9 +417,6 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, > unsigned long pt_mfn[7]; > struct domain_iommu *hd = dom_iommu(d); > > - if ( iommu_use_hap_pt(d) ) > - return 0; > - > memset(pt_mfn, 0, sizeof(pt_mfn)); > > spin_lock(&hd->arch.mapping_lock); > @@ -558,28 +552,6 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain, > return rt; > } > > -/* Share p2m table with iommu. */ > -void amd_iommu_share_p2m(struct domain *d) > -{ > - struct domain_iommu *hd = dom_iommu(d); > - struct page_info *p2m_table; > - mfn_t pgd_mfn; > - > - pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); > - p2m_table = mfn_to_page(pgd_mfn); > - > - if ( hd->arch.root_table != p2m_table ) > - { > - free_amd_iommu_pgtable(hd->arch.root_table); > - hd->arch.root_table = p2m_table; > - > - /* When sharing p2m with iommu, paging mode = 4 */ > - hd->arch.paging_mode = 4; > - AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n", > - mfn_x(pgd_mfn)); > - } > -} > - > /* > * Local variables: > * mode: C > diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c > index 4afbcd1609..be076210b6 100644 > --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c > +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c > @@ -396,9 +396,6 @@ static void deallocate_iommu_page_tables(struct domain *d) > { > struct domain_iommu *hd = dom_iommu(d); > > - if ( iommu_use_hap_pt(d) ) > - return; > - > spin_lock(&hd->arch.mapping_lock); > if ( hd->arch.root_table ) > { > @@ -566,7 +563,6 @@ static const struct iommu_ops __initconstrel _iommu_ops = { > .setup_hpet_msi = amd_setup_hpet_msi, > .suspend = amd_iommu_suspend, > .resume = amd_iommu_resume, > - .share_p2m = amd_iommu_share_p2m, > .crash_shutdown = amd_iommu_crash_shutdown, > .dump_p2m_table = amd_dump_p2m_table, > }; > diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h > index e0d5d23978..b832f564a7 100644 > --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h > +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h > @@ -66,9 +66,6 @@ int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn, > unsigned int flush_flags); > int __must_check amd_iommu_flush_iotlb_all(struct domain *d); > > -/* Share p2m table with iommu */ > -void amd_iommu_share_p2m(struct domain *d); > - > /* device table functions */ > int get_dma_requestor_id(uint16_t seg, uint16_t bdf); > void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte, > -- > 2.17.1 >
diff --git a/xen/drivers/passthrough/amd/iommu_map.c b/xen/drivers/passthrough/amd/iommu_map.c index cbf00e9e72..90cc7075c2 100644 --- a/xen/drivers/passthrough/amd/iommu_map.c +++ b/xen/drivers/passthrough/amd/iommu_map.c @@ -364,9 +364,6 @@ int amd_iommu_map_page(struct domain *d, dfn_t dfn, mfn_t mfn, int rc; unsigned long pt_mfn[7]; - if ( iommu_use_hap_pt(d) ) - return 0; - memset(pt_mfn, 0, sizeof(pt_mfn)); spin_lock(&hd->arch.mapping_lock); @@ -420,9 +417,6 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn, unsigned long pt_mfn[7]; struct domain_iommu *hd = dom_iommu(d); - if ( iommu_use_hap_pt(d) ) - return 0; - memset(pt_mfn, 0, sizeof(pt_mfn)); spin_lock(&hd->arch.mapping_lock); @@ -558,28 +552,6 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain, return rt; } -/* Share p2m table with iommu. */ -void amd_iommu_share_p2m(struct domain *d) -{ - struct domain_iommu *hd = dom_iommu(d); - struct page_info *p2m_table; - mfn_t pgd_mfn; - - pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d))); - p2m_table = mfn_to_page(pgd_mfn); - - if ( hd->arch.root_table != p2m_table ) - { - free_amd_iommu_pgtable(hd->arch.root_table); - hd->arch.root_table = p2m_table; - - /* When sharing p2m with iommu, paging mode = 4 */ - hd->arch.paging_mode = 4; - AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = %#lx\n", - mfn_x(pgd_mfn)); - } -} - /* * Local variables: * mode: C diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c index 4afbcd1609..be076210b6 100644 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -396,9 +396,6 @@ static void deallocate_iommu_page_tables(struct domain *d) { struct domain_iommu *hd = dom_iommu(d); - if ( iommu_use_hap_pt(d) ) - return; - spin_lock(&hd->arch.mapping_lock); if ( hd->arch.root_table ) { @@ -566,7 +563,6 @@ static const struct iommu_ops __initconstrel _iommu_ops = { .setup_hpet_msi = amd_setup_hpet_msi, .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, - .share_p2m = amd_iommu_share_p2m, .crash_shutdown = amd_iommu_crash_shutdown, .dump_p2m_table = amd_dump_p2m_table, }; diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h index e0d5d23978..b832f564a7 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -66,9 +66,6 @@ int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn, unsigned int flush_flags); int __must_check amd_iommu_flush_iotlb_all(struct domain *d); -/* Share p2m table with iommu */ -void amd_iommu_share_p2m(struct domain *d); - /* device table functions */ int get_dma_requestor_id(uint16_t seg, uint16_t bdf); void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte,
At this moment IOMMU pt sharing is disabled by commit [1]. This patch cleans the unreachable code garded by iommu_hap_pt_share. [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> --- xen/drivers/passthrough/amd/iommu_map.c | 28 ------------------- xen/drivers/passthrough/amd/pci_amd_iommu.c | 4 --- xen/include/asm-x86/hvm/svm/amd-iommu-proto.h | 3 -- 3 files changed, 35 deletions(-)