Message ID | 20190716120056.1723-1-aisaila@bitdefender.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v3,1/2] x86/mm: Clean IOMMU flags from p2m-pt code | expand |
On 16.07.2019 14:01, Alexandru Stefan ISAILA wrote: > At this moment IOMMU pt sharing is disabled by commit [1]. > > This patch aims to clear the IOMMU hap share support as it will not be > used in the future. By doing this the IOMMU bits used in pte[52:58] can > be used in other ways. > > [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 > > Suggested-by: George Dunlap <george.dunlap@citrix.com> > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> Reviewed-by: Jan Beulich <jbeulich@suse.com>
On Tue, Jul 16, 2019 at 12:01:11PM +0000, Alexandru Stefan ISAILA wrote: > At this moment IOMMU pt sharing is disabled by commit [1]. > > This patch aims to clear the IOMMU hap share support as it will not be > used in the future. By doing this the IOMMU bits used in pte[52:58] can > be used in other ways. > > [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 > > Suggested-by: George Dunlap <george.dunlap@citrix.com> > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> Acked-by: Brian Woods <brian.woods@amd.com> > --- > Changes since V1: > - Rework commit message > - Reflow comments > - Move flags init to declaration in p2m_type_to_flags. > --- > xen/arch/x86/mm/p2m-pt.c | 96 +++------------------------------------- > 1 file changed, 5 insertions(+), 91 deletions(-) > > diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c > index cafc9f299b..3a0a500d66 100644 > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -24,7 +24,6 @@ > * along with this program; If not, see <http://www.gnu.org/licenses/>. > */ > > -#include <xen/iommu.h> > #include <xen/vm_event.h> > #include <xen/event.h> > #include <xen/trace.h> > @@ -36,15 +35,13 @@ > #include <asm/p2m.h> > #include <asm/mem_sharing.h> > #include <asm/hvm/nestedhvm.h> > -#include <asm/hvm/svm/amd-iommu-proto.h> > > #include "mm-locks.h" > > /* > * We may store INVALID_MFN in PTEs. We need to clip this to avoid trampling > - * over higher-order bits (NX, p2m type, IOMMU flags). We seem to not need > - * to unclip on the read path, as callers are concerned only with p2m type in > - * such cases. > + * over higher-order bits (NX, p2m type). We seem to not need to unclip on the > + * read path, as callers are concerned only with p2m type in such cases. > */ > #define p2m_l1e_from_pfn(pfn, flags) \ > l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags)) > @@ -71,13 +68,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, > mfn_t mfn, > unsigned int level) > { > - unsigned long flags; > - /* > - * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be > - * used for iommu hardware to encode next io page level. Bit 59 - bit 62 > - * are used for iommu flags, We could not use these bits to store p2m types. > - */ > - flags = (unsigned long)(t & 0x7f) << 12; > + unsigned long flags = (unsigned long)(t & 0x7f) << 12; > > switch(t) > { > @@ -165,16 +156,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order) > // Returns 0 on error. > // > > -/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */ > -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21)) > - > -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry, > - unsigned int nlevel, unsigned int flags) > -{ > - if ( iommu_hap_pt_share ) > - l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags)); > -} > - > /* Returns: 0 for success, -errno for failure */ > static int > p2m_next_level(struct p2m_domain *p2m, void **table, > @@ -203,7 +184,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, > > new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); > > - p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable); > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); > if ( rc ) > goto error; > @@ -242,13 +222,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, > > l1_entry = map_domain_page(mfn); > > - /* Inherit original IOMMU permissions, but update Next Level. */ > - if ( iommu_hap_pt_share ) > - { > - flags &= ~iommu_nlevel_to_flags(~0, 0); > - flags |= iommu_nlevel_to_flags(level - 1, 0); > - } > - > for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ ) > { > new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)), > @@ -264,8 +237,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, > unmap_domain_page(l1_entry); > > new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); > - p2m_add_iommu_flags(&new_entry, level, > - IOMMUF_readable|IOMMUF_writable); > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, > level + 1); > if ( rc ) > @@ -470,9 +441,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) > } > > e = l1e_from_pfn(mfn, flags); > - p2m_add_iommu_flags(&e, level, > - (nt == p2m_ram_rw) > - ? IOMMUF_readable|IOMMUF_writable : 0); > ASSERT(!needs_recalc(l1, e)); > } > else > @@ -540,18 +508,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > l2_pgentry_t l2e_content; > l3_pgentry_t l3e_content; > int rc; > - unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn); > - /* > - * old_mfn and iommu_old_flags control possible flush/update needs on the > - * IOMMU: We need to flush when MFN or flags (i.e. permissions) change. > - * iommu_old_flags being initialized to zero covers the case of the entry > - * getting replaced being a non-present (leaf or intermediate) one. For > - * present leaf entries the real value will get calculated below, while > - * for present intermediate entries ~0 (guaranteed != iommu_pte_flags) > - * will be used (to cover all cases of what the leaf entries underneath > - * the intermediate one might be). > - */ > - unsigned int flags, iommu_old_flags = 0; > + unsigned int flags; > unsigned long old_mfn = mfn_x(INVALID_MFN); > > if ( !sve ) > @@ -599,17 +556,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > if ( flags & _PAGE_PRESENT ) > { > if ( flags & _PAGE_PSE ) > - { > old_mfn = l1e_get_pfn(*p2m_entry); > - iommu_old_flags = > - p2m_get_iommu_flags(p2m_flags_to_type(flags), > - _mfn(old_mfn)); > - } > else > - { > - iommu_old_flags = ~0; > intermediate_entry = *p2m_entry; > - } > } > > check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); > @@ -619,9 +568,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > : l3e_empty(); > entry_content.l1 = l3e_content.l3; > > - if ( entry_content.l1 != 0 ) > - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); > - > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3); > /* NB: paging_write_p2m_entry() handles tlb flushes properly */ > if ( rc ) > @@ -648,9 +594,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > 0, L1_PAGETABLE_ENTRIES); > ASSERT(p2m_entry); > old_mfn = l1e_get_pfn(*p2m_entry); > - iommu_old_flags = > - p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)), > - _mfn(old_mfn)); > > if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) > entry_content = p2m_l1e_from_pfn(mfn_x(mfn), > @@ -658,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > else > entry_content = l1e_empty(); > > - if ( entry_content.l1 != 0 ) > - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); > - > /* level 1 entry */ > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1); > /* NB: paging_write_p2m_entry() handles tlb flushes properly */ > @@ -677,17 +617,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > if ( flags & _PAGE_PRESENT ) > { > if ( flags & _PAGE_PSE ) > - { > old_mfn = l1e_get_pfn(*p2m_entry); > - iommu_old_flags = > - p2m_get_iommu_flags(p2m_flags_to_type(flags), > - _mfn(old_mfn)); > - } > else > - { > - iommu_old_flags = ~0; > intermediate_entry = *p2m_entry; > - } > } > > check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); > @@ -697,9 +629,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > : l2e_empty(); > entry_content.l1 = l2e_content.l2; > > - if ( entry_content.l1 != 0 ) > - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); > - > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2); > /* NB: paging_write_p2m_entry() handles tlb flushes properly */ > if ( rc ) > @@ -711,24 +640,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) > p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; > > - if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags || > - old_mfn != mfn_x(mfn)) ) > - { > - ASSERT(rc == 0); > - > - if ( need_iommu_pt_sync(p2m->domain) ) > - rc = iommu_pte_flags ? > - iommu_legacy_map(d, _dfn(gfn), mfn, page_order, > - iommu_pte_flags) : > - iommu_legacy_unmap(d, _dfn(gfn), page_order); > - else if ( iommu_use_hap_pt(d) && iommu_old_flags ) > - amd_iommu_flush_pages(p2m->domain, gfn, page_order); > - } > - > /* > * Free old intermediate tables if necessary. This has to be the > - * last thing we do, after removal from the IOMMU tables, so as to > - * avoid a potential use-after-free. > + * last thing we do so as to avoid a potential use-after-free. > */ > if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT ) > p2m_free_entry(p2m, &intermediate_entry, page_order); > -- > 2.17.1 >
Hi George, Did you get a chance to look at this clean-up? Thanks, Alex On 16.07.2019 15:01, Alexandru Stefan ISAILA wrote: > At this moment IOMMU pt sharing is disabled by commit [1]. > > This patch aims to clear the IOMMU hap share support as it will not be > used in the future. By doing this the IOMMU bits used in pte[52:58] can > be used in other ways. > > [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 > > Suggested-by: George Dunlap <george.dunlap@citrix.com> > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> > > --- > Changes since V1: > - Rework commit message > - Reflow comments > - Move flags init to declaration in p2m_type_to_flags. > --- > xen/arch/x86/mm/p2m-pt.c | 96 +++------------------------------------- > 1 file changed, 5 insertions(+), 91 deletions(-) > > diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c > index cafc9f299b..3a0a500d66 100644 > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -24,7 +24,6 @@ > * along with this program; If not, see <http://www.gnu.org/licenses/>. > */ > > -#include <xen/iommu.h> > #include <xen/vm_event.h> > #include <xen/event.h> > #include <xen/trace.h> > @@ -36,15 +35,13 @@ > #include <asm/p2m.h> > #include <asm/mem_sharing.h> > #include <asm/hvm/nestedhvm.h> > -#include <asm/hvm/svm/amd-iommu-proto.h> > > #include "mm-locks.h" > > /* > * We may store INVALID_MFN in PTEs. We need to clip this to avoid trampling > - * over higher-order bits (NX, p2m type, IOMMU flags). We seem to not need > - * to unclip on the read path, as callers are concerned only with p2m type in > - * such cases. > + * over higher-order bits (NX, p2m type). We seem to not need to unclip on the > + * read path, as callers are concerned only with p2m type in such cases. > */ > #define p2m_l1e_from_pfn(pfn, flags) \ > l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags)) > @@ -71,13 +68,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, > mfn_t mfn, > unsigned int level) > { > - unsigned long flags; > - /* > - * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be > - * used for iommu hardware to encode next io page level. Bit 59 - bit 62 > - * are used for iommu flags, We could not use these bits to store p2m types. > - */ > - flags = (unsigned long)(t & 0x7f) << 12; > + unsigned long flags = (unsigned long)(t & 0x7f) << 12; > > switch(t) > { > @@ -165,16 +156,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order) > // Returns 0 on error. > // > > -/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */ > -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21)) > - > -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry, > - unsigned int nlevel, unsigned int flags) > -{ > - if ( iommu_hap_pt_share ) > - l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags)); > -} > - > /* Returns: 0 for success, -errno for failure */ > static int > p2m_next_level(struct p2m_domain *p2m, void **table, > @@ -203,7 +184,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, > > new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); > > - p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable); > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); > if ( rc ) > goto error; > @@ -242,13 +222,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, > > l1_entry = map_domain_page(mfn); > > - /* Inherit original IOMMU permissions, but update Next Level. */ > - if ( iommu_hap_pt_share ) > - { > - flags &= ~iommu_nlevel_to_flags(~0, 0); > - flags |= iommu_nlevel_to_flags(level - 1, 0); > - } > - > for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ ) > { > new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)), > @@ -264,8 +237,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, > unmap_domain_page(l1_entry); > > new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); > - p2m_add_iommu_flags(&new_entry, level, > - IOMMUF_readable|IOMMUF_writable); > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, > level + 1); > if ( rc ) > @@ -470,9 +441,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) > } > > e = l1e_from_pfn(mfn, flags); > - p2m_add_iommu_flags(&e, level, > - (nt == p2m_ram_rw) > - ? IOMMUF_readable|IOMMUF_writable : 0); > ASSERT(!needs_recalc(l1, e)); > } > else > @@ -540,18 +508,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > l2_pgentry_t l2e_content; > l3_pgentry_t l3e_content; > int rc; > - unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn); > - /* > - * old_mfn and iommu_old_flags control possible flush/update needs on the > - * IOMMU: We need to flush when MFN or flags (i.e. permissions) change. > - * iommu_old_flags being initialized to zero covers the case of the entry > - * getting replaced being a non-present (leaf or intermediate) one. For > - * present leaf entries the real value will get calculated below, while > - * for present intermediate entries ~0 (guaranteed != iommu_pte_flags) > - * will be used (to cover all cases of what the leaf entries underneath > - * the intermediate one might be). > - */ > - unsigned int flags, iommu_old_flags = 0; > + unsigned int flags; > unsigned long old_mfn = mfn_x(INVALID_MFN); > > if ( !sve ) > @@ -599,17 +556,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > if ( flags & _PAGE_PRESENT ) > { > if ( flags & _PAGE_PSE ) > - { > old_mfn = l1e_get_pfn(*p2m_entry); > - iommu_old_flags = > - p2m_get_iommu_flags(p2m_flags_to_type(flags), > - _mfn(old_mfn)); > - } > else > - { > - iommu_old_flags = ~0; > intermediate_entry = *p2m_entry; > - } > } > > check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); > @@ -619,9 +568,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > : l3e_empty(); > entry_content.l1 = l3e_content.l3; > > - if ( entry_content.l1 != 0 ) > - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); > - > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3); > /* NB: paging_write_p2m_entry() handles tlb flushes properly */ > if ( rc ) > @@ -648,9 +594,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > 0, L1_PAGETABLE_ENTRIES); > ASSERT(p2m_entry); > old_mfn = l1e_get_pfn(*p2m_entry); > - iommu_old_flags = > - p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)), > - _mfn(old_mfn)); > > if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) > entry_content = p2m_l1e_from_pfn(mfn_x(mfn), > @@ -658,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > else > entry_content = l1e_empty(); > > - if ( entry_content.l1 != 0 ) > - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); > - > /* level 1 entry */ > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1); > /* NB: paging_write_p2m_entry() handles tlb flushes properly */ > @@ -677,17 +617,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > if ( flags & _PAGE_PRESENT ) > { > if ( flags & _PAGE_PSE ) > - { > old_mfn = l1e_get_pfn(*p2m_entry); > - iommu_old_flags = > - p2m_get_iommu_flags(p2m_flags_to_type(flags), > - _mfn(old_mfn)); > - } > else > - { > - iommu_old_flags = ~0; > intermediate_entry = *p2m_entry; > - } > } > > check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); > @@ -697,9 +629,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > : l2e_empty(); > entry_content.l1 = l2e_content.l2; > > - if ( entry_content.l1 != 0 ) > - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); > - > rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2); > /* NB: paging_write_p2m_entry() handles tlb flushes properly */ > if ( rc ) > @@ -711,24 +640,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, > && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) > p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; > > - if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags || > - old_mfn != mfn_x(mfn)) ) > - { > - ASSERT(rc == 0); > - > - if ( need_iommu_pt_sync(p2m->domain) ) > - rc = iommu_pte_flags ? > - iommu_legacy_map(d, _dfn(gfn), mfn, page_order, > - iommu_pte_flags) : > - iommu_legacy_unmap(d, _dfn(gfn), page_order); > - else if ( iommu_use_hap_pt(d) && iommu_old_flags ) > - amd_iommu_flush_pages(p2m->domain, gfn, page_order); > - } > - > /* > * Free old intermediate tables if necessary. This has to be the > - * last thing we do, after removal from the IOMMU tables, so as to > - * avoid a potential use-after-free. > + * last thing we do so as to avoid a potential use-after-free. > */ > if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT ) > p2m_free_entry(p2m, &intermediate_entry, page_order); >
On 7/16/19 1:01 PM, Alexandru Stefan ISAILA wrote: > At this moment IOMMU pt sharing is disabled by commit [1]. > > This patch aims to clear the IOMMU hap share support as it will not be > used in the future. By doing this the IOMMU bits used in pte[52:58] can > be used in other ways. > > [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 > > Suggested-by: George Dunlap <george.dunlap@citrix.com> > Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> Sorry for the delay: Reviewed-by: George Dunlap <george.dunlap@citrix.com>
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index cafc9f299b..3a0a500d66 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -24,7 +24,6 @@ * along with this program; If not, see <http://www.gnu.org/licenses/>. */ -#include <xen/iommu.h> #include <xen/vm_event.h> #include <xen/event.h> #include <xen/trace.h> @@ -36,15 +35,13 @@ #include <asm/p2m.h> #include <asm/mem_sharing.h> #include <asm/hvm/nestedhvm.h> -#include <asm/hvm/svm/amd-iommu-proto.h> #include "mm-locks.h" /* * We may store INVALID_MFN in PTEs. We need to clip this to avoid trampling - * over higher-order bits (NX, p2m type, IOMMU flags). We seem to not need - * to unclip on the read path, as callers are concerned only with p2m type in - * such cases. + * over higher-order bits (NX, p2m type). We seem to not need to unclip on the + * read path, as callers are concerned only with p2m type in such cases. */ #define p2m_l1e_from_pfn(pfn, flags) \ l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags)) @@ -71,13 +68,7 @@ static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m, mfn_t mfn, unsigned int level) { - unsigned long flags; - /* - * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be - * used for iommu hardware to encode next io page level. Bit 59 - bit 62 - * are used for iommu flags, We could not use these bits to store p2m types. - */ - flags = (unsigned long)(t & 0x7f) << 12; + unsigned long flags = (unsigned long)(t & 0x7f) << 12; switch(t) { @@ -165,16 +156,6 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order) // Returns 0 on error. // -/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */ -#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21)) - -static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry, - unsigned int nlevel, unsigned int flags) -{ - if ( iommu_hap_pt_share ) - l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags)); -} - /* Returns: 0 for success, -errno for failure */ static int p2m_next_level(struct p2m_domain *p2m, void **table, @@ -203,7 +184,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); - p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable); rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); if ( rc ) goto error; @@ -242,13 +222,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, l1_entry = map_domain_page(mfn); - /* Inherit original IOMMU permissions, but update Next Level. */ - if ( iommu_hap_pt_share ) - { - flags &= ~iommu_nlevel_to_flags(~0, 0); - flags |= iommu_nlevel_to_flags(level - 1, 0); - } - for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ ) { new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)), @@ -264,8 +237,6 @@ p2m_next_level(struct p2m_domain *p2m, void **table, unmap_domain_page(l1_entry); new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW); - p2m_add_iommu_flags(&new_entry, level, - IOMMUF_readable|IOMMUF_writable); rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); if ( rc ) @@ -470,9 +441,6 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) } e = l1e_from_pfn(mfn, flags); - p2m_add_iommu_flags(&e, level, - (nt == p2m_ram_rw) - ? IOMMUF_readable|IOMMUF_writable : 0); ASSERT(!needs_recalc(l1, e)); } else @@ -540,18 +508,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, l2_pgentry_t l2e_content; l3_pgentry_t l3e_content; int rc; - unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn); - /* - * old_mfn and iommu_old_flags control possible flush/update needs on the - * IOMMU: We need to flush when MFN or flags (i.e. permissions) change. - * iommu_old_flags being initialized to zero covers the case of the entry - * getting replaced being a non-present (leaf or intermediate) one. For - * present leaf entries the real value will get calculated below, while - * for present intermediate entries ~0 (guaranteed != iommu_pte_flags) - * will be used (to cover all cases of what the leaf entries underneath - * the intermediate one might be). - */ - unsigned int flags, iommu_old_flags = 0; + unsigned int flags; unsigned long old_mfn = mfn_x(INVALID_MFN); if ( !sve ) @@ -599,17 +556,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( flags & _PAGE_PRESENT ) { if ( flags & _PAGE_PSE ) - { old_mfn = l1e_get_pfn(*p2m_entry); - iommu_old_flags = - p2m_get_iommu_flags(p2m_flags_to_type(flags), - _mfn(old_mfn)); - } else - { - iommu_old_flags = ~0; intermediate_entry = *p2m_entry; - } } check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); @@ -619,9 +568,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, : l3e_empty(); entry_content.l1 = l3e_content.l3; - if ( entry_content.l1 != 0 ) - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); - rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3); /* NB: paging_write_p2m_entry() handles tlb flushes properly */ if ( rc ) @@ -648,9 +594,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, 0, L1_PAGETABLE_ENTRIES); ASSERT(p2m_entry); old_mfn = l1e_get_pfn(*p2m_entry); - iommu_old_flags = - p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)), - _mfn(old_mfn)); if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) entry_content = p2m_l1e_from_pfn(mfn_x(mfn), @@ -658,9 +601,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, else entry_content = l1e_empty(); - if ( entry_content.l1 != 0 ) - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); - /* level 1 entry */ rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1); /* NB: paging_write_p2m_entry() handles tlb flushes properly */ @@ -677,17 +617,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( flags & _PAGE_PRESENT ) { if ( flags & _PAGE_PSE ) - { old_mfn = l1e_get_pfn(*p2m_entry); - iommu_old_flags = - p2m_get_iommu_flags(p2m_flags_to_type(flags), - _mfn(old_mfn)); - } else - { - iommu_old_flags = ~0; intermediate_entry = *p2m_entry; - } } check_entry(mfn, p2mt, p2m_flags_to_type(flags), page_order); @@ -697,9 +629,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, : l2e_empty(); entry_content.l1 = l2e_content.l2; - if ( entry_content.l1 != 0 ) - p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags); - rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2); /* NB: paging_write_p2m_entry() handles tlb flushes properly */ if ( rc ) @@ -711,24 +640,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; - if ( iommu_enabled && (iommu_old_flags != iommu_pte_flags || - old_mfn != mfn_x(mfn)) ) - { - ASSERT(rc == 0); - - if ( need_iommu_pt_sync(p2m->domain) ) - rc = iommu_pte_flags ? - iommu_legacy_map(d, _dfn(gfn), mfn, page_order, - iommu_pte_flags) : - iommu_legacy_unmap(d, _dfn(gfn), page_order); - else if ( iommu_use_hap_pt(d) && iommu_old_flags ) - amd_iommu_flush_pages(p2m->domain, gfn, page_order); - } - /* * Free old intermediate tables if necessary. This has to be the - * last thing we do, after removal from the IOMMU tables, so as to - * avoid a potential use-after-free. + * last thing we do so as to avoid a potential use-after-free. */ if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT ) p2m_free_entry(p2m, &intermediate_entry, page_order);
At this moment IOMMU pt sharing is disabled by commit [1]. This patch aims to clear the IOMMU hap share support as it will not be used in the future. By doing this the IOMMU bits used in pte[52:58] can be used in other ways. [1] c2ba3db31ef2d9f1e40e7b6c16cf3be3d671d555 Suggested-by: George Dunlap <george.dunlap@citrix.com> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> --- Changes since V1: - Rework commit message - Reflow comments - Move flags init to declaration in p2m_type_to_flags. --- xen/arch/x86/mm/p2m-pt.c | 96 +++------------------------------------- 1 file changed, 5 insertions(+), 91 deletions(-)