@@ -526,27 +526,6 @@ void update_cr3(struct vcpu *v)
make_cr3(v, cr3_mfn);
}
-/*
- * Read the guest's l1e that maps this address, from the kernel-mode
- * page tables.
- */
-static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear)
-{
- struct vcpu *curr = current;
- const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
- l1_pgentry_t l1e;
-
- if ( user_mode )
- toggle_guest_mode(curr);
-
- l1e = guest_get_eff_l1e(linear);
-
- if ( user_mode )
- toggle_guest_mode(curr);
-
- return l1e;
-}
-
static inline void page_set_tlbflush_timestamp(struct page_info *page)
{
/*
@@ -615,58 +594,6 @@ static int alloc_segdesc_page(struct page_info *page)
return i == 512 ? 0 : -EINVAL;
}
-
-/*
- * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
- * into Xen's virtual range. Returns true if the mapping changed, false
- * otherwise.
- */
-bool map_ldt_shadow_page(unsigned int offset)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- struct page_info *page;
- l1_pgentry_t gl1e, *pl1e;
- unsigned long linear = v->arch.pv_vcpu.ldt_base + offset;
-
- BUG_ON(unlikely(in_irq()));
-
- /*
- * Hardware limit checking should guarantee this property. NB. This is
- * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
- * current vcpu, and vcpu_reset() will block until this vcpu has been
- * descheduled before continuing.
- */
- ASSERT((offset >> 3) <= v->arch.pv_vcpu.ldt_ents);
-
- if ( is_pv_32bit_domain(d) )
- linear = (uint32_t)linear;
-
- gl1e = guest_get_eff_kern_l1e(linear);
- if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
- return false;
-
- page = get_page_from_gfn(d, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
- if ( unlikely(!page) )
- return false;
-
- if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
- {
- put_page(page);
- return false;
- }
-
- pl1e = &pv_ldt_ptes(v)[offset >> PAGE_SHIFT];
- l1e_add_flags(gl1e, _PAGE_RW);
-
- spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
- l1e_write(pl1e, gl1e);
- v->arch.pv_vcpu.shadow_ldt_mapcnt++;
- spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
-
- return true;
-}
-
static int get_page_and_type_from_mfn(
mfn_t mfn, unsigned long type, struct domain *d,
int partial, int preemptible)
@@ -22,6 +22,9 @@
#include <xen/guest_access.h>
#include <asm/current.h>
+#include <asm/p2m.h>
+
+#include "mm.h"
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
@@ -58,6 +61,78 @@ l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn)
return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(linear);
}
+/*
+ * Read the guest's l1e that maps this address, from the kernel-mode
+ * page tables.
+ */
+static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear)
+{
+ struct vcpu *curr = current;
+ const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
+ l1_pgentry_t l1e;
+
+ if ( user_mode )
+ toggle_guest_mode(curr);
+
+ l1e = guest_get_eff_l1e(linear);
+
+ if ( user_mode )
+ toggle_guest_mode(curr);
+
+ return l1e;
+}
+
+/*
+ * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
+ * into Xen's virtual range. Returns true if the mapping changed, false
+ * otherwise.
+ */
+bool pv_map_ldt_shadow_page(unsigned int offset)
+{
+ struct vcpu *curr = current;
+ struct domain *currd = curr->domain;
+ struct page_info *page;
+ l1_pgentry_t gl1e, *pl1e;
+ unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset;
+
+ BUG_ON(unlikely(in_irq()));
+
+ /*
+ * Hardware limit checking should guarantee this property. NB. This is
+ * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
+ * current vcpu, and vcpu_reset() will block until this vcpu has been
+ * descheduled before continuing.
+ */
+ ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents);
+
+ if ( is_pv_32bit_domain(currd) )
+ linear = (uint32_t)linear;
+
+ gl1e = guest_get_eff_kern_l1e(linear);
+ if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
+ return false;
+
+ page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
+ if ( unlikely(!page) )
+ return false;
+
+ if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
+ {
+ put_page(page);
+ return false;
+ }
+
+ pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
+ l1e_add_flags(gl1e, _PAGE_RW);
+
+ spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+ l1e_write(pl1e, gl1e);
+ curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
+ spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+
+ return true;
+}
+
/*
* Local variables:
* mode: C
@@ -1101,7 +1101,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset,
/*
* If the fault is in another vcpu's area, it cannot be due to
* a GDT/LDT descriptor load. Thus we can reasonably exit immediately, and
- * indeed we have to since map_ldt_shadow_page() works correctly only on
+ * indeed we have to since pv_map_ldt_shadow_page() works correctly only on
* accesses to a vcpu's own area.
*/
if ( vcpu_area != curr->vcpu_id )
@@ -1113,7 +1113,7 @@ static int handle_gdt_ldt_mapping_fault(unsigned long offset,
if ( likely(is_ldt_area) )
{
/* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
- if ( likely(map_ldt_shadow_page(offset)) )
+ if ( likely(pv_map_ldt_shadow_page(offset)) )
{
if ( guest_mode(regs) )
trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
@@ -562,8 +562,6 @@ long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
-bool map_ldt_shadow_page(unsigned int);
-
#define NIL(type) ((type *)-sizeof(type))
#define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
@@ -28,6 +28,8 @@ int pv_ro_page_fault(unsigned long addr, struct cpu_user_regs *regs);
long pv_set_gdt(struct vcpu *d, unsigned long *frames, unsigned int entries);
void pv_destroy_gdt(struct vcpu *d);
+bool pv_map_ldt_shadow_page(unsigned int off);
+
#else
#include <xen/errno.h>
@@ -43,6 +45,8 @@ static inline long pv_set_gdt(struct vcpu *d, unsigned long *frames,
{ return -EINVAL; }
static inline void pv_destroy_gdt(struct vcpu *d) {}
+static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; }
+
#endif
#endif /* __X86_PV_MM_H__ */
Add pv prefix to it. Move it to pv/mm.c. Fix call sites. Take the chance to change v to curr and d to currd. Signed-off-by: Wei Liu <wei.liu2@citrix.com> --- xen/arch/x86/mm.c | 73 ------------------------------------------- xen/arch/x86/pv/mm.c | 75 +++++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/traps.c | 4 +-- xen/include/asm-x86/mm.h | 2 -- xen/include/asm-x86/pv/mm.h | 4 +++ 5 files changed, 81 insertions(+), 77 deletions(-)