@@ -474,42 +474,6 @@ static inline void page_set_tlbflush_timestamp(struct page_info *page)
const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
zero_page[PAGE_SIZE];
-static void invalidate_shadow_ldt(struct vcpu *v, int flush)
-{
- l1_pgentry_t *pl1e;
- unsigned int i;
- struct page_info *page;
-
- BUG_ON(unlikely(in_irq()));
-
- spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
-
- if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
- goto out;
-
- v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
- pl1e = gdt_ldt_ptes(v->domain, v);
-
- for ( i = 16; i < 32; i++ )
- {
- if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
- continue;
- page = l1e_get_page(pl1e[i]);
- l1e_write(&pl1e[i], l1e_empty());
- ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
- ASSERT_PAGE_IS_DOMAIN(page, v->domain);
- put_page_and_type(page);
- }
-
- /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
- if ( flush )
- flush_tlb_mask(v->vcpu_dirty_cpumask);
-
- out:
- spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
-}
-
-
bool get_page_from_mfn(mfn_t mfn, struct domain *d)
{
struct page_info *page = mfn_to_page(mfn_x(mfn));
@@ -1055,7 +1019,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
(l1e_owner == pg_owner) )
{
for_each_vcpu ( pg_owner, v )
- invalidate_shadow_ldt(v, 1);
+ pv_invalidate_shadow_ldt(v, true);
}
put_page(page);
}
@@ -1954,7 +1918,7 @@ int pv_new_guest_cr3(unsigned long mfn)
return rc;
}
- invalidate_shadow_ldt(curr, 0);
+ pv_invalidate_shadow_ldt(curr, false);
write_ptbase(curr);
return 0;
@@ -1992,7 +1956,7 @@ int pv_new_guest_cr3(unsigned long mfn)
return rc;
}
- invalidate_shadow_ldt(curr, 0);
+ pv_invalidate_shadow_ldt(curr, false);
if ( !VM_ASSIST(currd, m2p_strict) && !paging_mode_refcounts(currd) )
fill_ro_mpt(mfn);
@@ -2492,7 +2456,7 @@ long do_mmuext_op(
else if ( (curr->arch.pv_vcpu.ldt_ents != ents) ||
(curr->arch.pv_vcpu.ldt_base != ptr) )
{
- invalidate_shadow_ldt(curr, 0);
+ pv_invalidate_shadow_ldt(curr, false);
flush_tlb_local();
curr->arch.pv_vcpu.ldt_base = ptr;
curr->arch.pv_vcpu.ldt_ents = ents;
@@ -742,6 +742,41 @@ int pv_free_page_type(struct page_info *page, unsigned long type,
return rc;
}
+void pv_invalidate_shadow_ldt(struct vcpu *v, bool flush)
+{
+ l1_pgentry_t *pl1e;
+ unsigned int i;
+ struct page_info *page;
+
+ BUG_ON(unlikely(in_irq()));
+
+ spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+ if ( v->arch.pv_vcpu.shadow_ldt_mapcnt == 0 )
+ goto out;
+
+ v->arch.pv_vcpu.shadow_ldt_mapcnt = 0;
+ pl1e = gdt_ldt_ptes(v->domain, v);
+
+ for ( i = 16; i < 32; i++ )
+ {
+ if ( !(l1e_get_flags(pl1e[i]) & _PAGE_PRESENT) )
+ continue;
+ page = l1e_get_page(pl1e[i]);
+ l1e_write(&pl1e[i], l1e_empty());
+ ASSERT_PAGE_IS_TYPE(page, PGT_seg_desc_page);
+ ASSERT_PAGE_IS_DOMAIN(page, v->domain);
+ put_page_and_type(page);
+ }
+
+ /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
+ if ( flush )
+ flush_tlb_mask(v->vcpu_dirty_cpumask);
+
+ out:
+ spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
+}
+
/*
* Local variables:
* mode: C
@@ -100,6 +100,8 @@ int pv_alloc_page_type(struct page_info *page, unsigned long type,
int pv_free_page_type(struct page_info *page, unsigned long type,
bool preemptible);
+void pv_invalidate_shadow_ldt(struct vcpu *v, bool flush);
+
#else
#include <xen/errno.h>
@@ -139,6 +141,8 @@ static inline int pv_free_page_type(struct page_info *page, unsigned long type,
bool preemptible)
{ BUG(); return -EINVAL; }
+static inline void pv_invalidate_shadow_ldt(struct vcpu *v, bool flush) {}
+
#endif
#endif /* __X86_PV_MM_H__ */
Move the code to pv/mm.c and export it via pv/mm.h. Use bool for flush. Signed-off-by: Wei Liu <wei.liu2@citrix.com> --- xen/arch/x86/mm.c | 44 ++++---------------------------------------- xen/arch/x86/pv/mm.c | 35 +++++++++++++++++++++++++++++++++++ xen/include/asm-x86/pv/mm.h | 4 ++++ 3 files changed, 43 insertions(+), 40 deletions(-)