diff mbox

[PATCHv2] x86/xen: avoid m2p lookup when setting early page table entries

Message ID 1466525353-27751-1-git-send-email-david.vrabel@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Vrabel June 21, 2016, 4:09 p.m. UTC
When page tables entries are set using xen_set_pte_init() during early
boot there is no page fault handler that could handle a fault when
performing an M2P lookup.

In 64 guest (usually dom0) early_ioremap() would fault in
xen_set_pte_init() because an M2P lookup faults because the MFN is in
MMIO space and not mapped in the M2P.  This lookup is done to see if
the PFN in in the range used for the initial page table pages, so that
the PTE may be set as read-only.

The M2P lookup can be avoided by moving the check (and clear of RW)
earlier when the PFN is still available.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Tested-by: Keven Moraga <kmoragas@riseup.net>
---
v2:

- Remove __init annotation from xen_make_pte_init() since
  PV_CALLEE_SAVE_REGS_THUNK always puts the thunk in .text.

- mask_rw_pte() -> mask_rw_pteval() for x86-64.
---
 arch/x86/xen/mmu.c | 28 +++++++++++++++++++++-------
 1 file changed, 21 insertions(+), 7 deletions(-)

Comments

Boris Ostrovsky June 21, 2016, 7:31 p.m. UTC | #1
On 06/21/2016 12:09 PM, David Vrabel wrote:
> When page tables entries are set using xen_set_pte_init() during early
> boot there is no page fault handler that could handle a fault when
> performing an M2P lookup.
>
> In 64 guest (usually dom0) early_ioremap() would fault in
> xen_set_pte_init() because an M2P lookup faults because the MFN is in
> MMIO space and not mapped in the M2P.  This lookup is done to see if
> the PFN in in the range used for the initial page table pages, so that
> the PTE may be set as read-only.
>
> The M2P lookup can be avoided by moving the check (and clear of RW)
> earlier when the PFN is still available.
>
> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
> Tested-by: Keven Moraga <kmoragas@riseup.net>
> ---
> v2:
>
> - Remove __init annotation from xen_make_pte_init() since
>   PV_CALLEE_SAVE_REGS_THUNK always puts the thunk in .text.
>
> - mask_rw_pte() -> mask_rw_pteval() for x86-64.

I don't think you actually renamed the routine.

> ---
>  arch/x86/xen/mmu.c | 28 +++++++++++++++++++++-------
>  1 file changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
> index 478a2de..e47bc19 100644
> --- a/arch/x86/xen/mmu.c
> +++ b/arch/x86/xen/mmu.c
> @@ -1562,7 +1562,7 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
>  	return pte;
>  }
>  #else /* CONFIG_X86_64 */
> -static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
> +static pteval_t __init mask_rw_pte(pteval_t pte)
>  {
>  	unsigned long pfn;
>  
> @@ -1577,10 +1577,10 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
>  	 * page tables for mapping the p2m list, too, and page tables MUST be
>  	 * mapped read-only.
>  	 */
> -	pfn = pte_pfn(pte);
> +	pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;

Is it obvious that we are holding valid PFN at this point? It wasn't
immediately obvious to me so I wonder whether a comment stating this
would be useful here (yes, you mention it in the commit messages).

-boris

>  	if (pfn >= xen_start_info->first_p2m_pfn &&
>  	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
> -		pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
> +		pte &= ~_PAGE_RW;
>  
>  	return pte;
>  }
> @@ -1600,13 +1600,26 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
>   * so always write the PTE directly and rely on Xen trapping and
>   * emulating any updates as necessary.
>   */
> +__visible pte_t xen_make_pte_init(pteval_t pte)
> +{
> +#ifdef CONFIG_X86_64
> +	pte = mask_rw_pte(pte);
> +#endif
> +	pte = pte_pfn_to_mfn(pte);
> +
> +	if ((pte & PTE_PFN_MASK) >> PAGE_SHIFT == INVALID_P2M_ENTRY)
> +		pte = 0;
> +
> +	return native_make_pte(pte);
> +}
> +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
> +
>  static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
>  {
> +#ifdef CONFIG_X86_32
>  	if (pte_mfn(pte) != INVALID_P2M_ENTRY)
>  		pte = mask_rw_pte(ptep, pte);
> -	else
> -		pte = __pte_ma(0);
> -
> +#endif
>  	native_set_pte(ptep, pte);
>  }
>  
> @@ -2407,6 +2420,7 @@ static void __init xen_post_allocator_init(void)
>  	pv_mmu_ops.alloc_pud = xen_alloc_pud;
>  	pv_mmu_ops.release_pud = xen_release_pud;
>  #endif
> +	pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
>  
>  #ifdef CONFIG_X86_64
>  	pv_mmu_ops.write_cr3 = &xen_write_cr3;
> @@ -2455,7 +2469,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
>  	.pte_val = PV_CALLEE_SAVE(xen_pte_val),
>  	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
>  
> -	.make_pte = PV_CALLEE_SAVE(xen_make_pte),
> +	.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
>  	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
>  
>  #ifdef CONFIG_X86_PAE
diff mbox

Patch

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 478a2de..e47bc19 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1562,7 +1562,7 @@  static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
 	return pte;
 }
 #else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
+static pteval_t __init mask_rw_pte(pteval_t pte)
 {
 	unsigned long pfn;
 
@@ -1577,10 +1577,10 @@  static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
 	 * page tables for mapping the p2m list, too, and page tables MUST be
 	 * mapped read-only.
 	 */
-	pfn = pte_pfn(pte);
+	pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
 	if (pfn >= xen_start_info->first_p2m_pfn &&
 	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
-		pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
+		pte &= ~_PAGE_RW;
 
 	return pte;
 }
@@ -1600,13 +1600,26 @@  static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
  * so always write the PTE directly and rely on Xen trapping and
  * emulating any updates as necessary.
  */
+__visible pte_t xen_make_pte_init(pteval_t pte)
+{
+#ifdef CONFIG_X86_64
+	pte = mask_rw_pte(pte);
+#endif
+	pte = pte_pfn_to_mfn(pte);
+
+	if ((pte & PTE_PFN_MASK) >> PAGE_SHIFT == INVALID_P2M_ENTRY)
+		pte = 0;
+
+	return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
+
 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
 {
+#ifdef CONFIG_X86_32
 	if (pte_mfn(pte) != INVALID_P2M_ENTRY)
 		pte = mask_rw_pte(ptep, pte);
-	else
-		pte = __pte_ma(0);
-
+#endif
 	native_set_pte(ptep, pte);
 }
 
@@ -2407,6 +2420,7 @@  static void __init xen_post_allocator_init(void)
 	pv_mmu_ops.alloc_pud = xen_alloc_pud;
 	pv_mmu_ops.release_pud = xen_release_pud;
 #endif
+	pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
 
 #ifdef CONFIG_X86_64
 	pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2469,7 @@  static const struct pv_mmu_ops xen_mmu_ops __initconst = {
 	.pte_val = PV_CALLEE_SAVE(xen_pte_val),
 	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
 
-	.make_pte = PV_CALLEE_SAVE(xen_make_pte),
+	.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
 	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
 
 #ifdef CONFIG_X86_PAE