From b8ec8e14c670215587a4e74c3aaec8dab6f26a8c Mon Sep 17 00:00:00 2001
From: George Dunlap <george.dunlap@eu.citrix.com>
Date: Tue, 22 Dec 2015 16:17:22 +0000
Subject: [PATCH] xen/mm: Clean up pfec handling in gva_to_gfn
At the moment, the pfec argument to gva_to_gfn has two functions:
* To inform guest_walk what kind of access is happenind
* As a value to pass back into the guest in the event of a fault.
Unfortunately this is not quite treated consistently: the hvm_fetch_*
function will "pre-clear" the PFEC_insn_fetch flag before calling
gva_to_gfn; meaning guest_walk doesn't actually know whether a given
access is an instruction fetch or not. This works now, but will cause
issues when pkeys are introduced, since guest_walk will need to know
whether an access is an instruction fetch even if it doesn't return
PFEC_insn_fetch.
Fix this by making a clean separation for in and out functionalities
of the pfec argument:
1. Always pass in the access type to gva_to_gfn
2. Filter out inappropriate access flags before returning from gva_to_gfn.
(The PFEC_insn_fetch flag should only be passed to the guest if either NX or
SMEP is enabled. See Intel 64 Developer's Manual, Volume 3, Section 4.7.)
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 8 ++------
xen/arch/x86/mm/hap/guest_walk.c | 10 +++++++++-
xen/arch/x86/mm/shadow/multi.c | 6 ++++++
3 files changed, 17 insertions(+), 7 deletions(-)
@@ -4423,11 +4423,9 @@ enum hvm_copy_result hvm_copy_from_guest_virt(
enum hvm_copy_result hvm_fetch_from_guest_virt(
void *buf, unsigned long vaddr, int size, uint32_t pfec)
{
- if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
- pfec |= PFEC_insn_fetch;
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
- PFEC_page_present | pfec);
+ PFEC_page_present | PFEC_insn_fetch | pfec);
}
enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
@@ -4449,11 +4447,9 @@ enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
void *buf, unsigned long vaddr, int size, uint32_t pfec)
{
- if ( hvm_nx_enabled(current) || hvm_smep_enabled(current) )
- pfec |= PFEC_insn_fetch;
return __hvm_copy(buf, vaddr, size,
HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
- PFEC_page_present | pfec);
+ PFEC_page_present | PFEC_insn_fetch | pfec);
}
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
@@ -82,7 +82,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
if ( !top_page )
{
pfec[0] &= ~PFEC_page_present;
- return INVALID_GFN;
+ goto out_tweak_pfec;
}
top_mfn = _mfn(page_to_mfn(top_page));
@@ -136,6 +136,14 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
if ( missing & _PAGE_SHARED )
pfec[0] = PFEC_page_shared;
+out_tweak_pfec:
+ /*
+ * Intel 64 Volume 3, Section 4.7: The PFEC_insn_fetch flag is set
+ * only when NX or SMEP are enabled.
+ */
+ if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
+ pfec[0] &= ~PFEC_insn_fetch;
+
return INVALID_GFN;
}
@@ -3668,6 +3668,12 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
pfec[0] &= ~PFEC_page_present;
if ( missing & _PAGE_INVALID_BITS )
pfec[0] |= PFEC_reserved_bit;
+ /*
+ * Intel 64 Volume 3, Section 4.7: The PFEC_insn_fetch flag is
+ * set only when NX or SMEP are enabled.
+ */
+ if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
+ pfec[0] &= ~PFEC_insn_fetch;
return INVALID_GFN;
}
gfn = guest_walk_to_gfn(&gw);
--
2.1.4