diff mbox series

[RFC,75/84] x86/mm: handle PSE early termination cases in virt_to_mfn_walk().

Message ID 5e135be6b8754ddfa24560eec8c5d1254dba34ae.1569489002.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Remove direct map from Xen | expand

Commit Message

Xia, Hongyan Sept. 26, 2019, 9:46 a.m. UTC
From: Hongyan Xia <hongyax@amazon.com>

Signed-off-by: Hongyan Xia <hongyax@amazon.com>
---
 xen/arch/x86/mm.c | 36 ++++++++++++++++++++++++++++++++++--
 1 file changed, 34 insertions(+), 2 deletions(-)

Comments

Wei Liu Sept. 26, 2019, 2:39 p.m. UTC | #1
On Thu, Sep 26, 2019 at 10:46:38AM +0100, hongyax@amazon.com wrote:
> From: Hongyan Xia <hongyax@amazon.com>
> 
> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
> ---
>  xen/arch/x86/mm.c | 36 ++++++++++++++++++++++++++++++++++--
>  1 file changed, 34 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index ab760ecc1f..39ba9f9bf4 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5058,8 +5058,40 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
>  
>  unsigned long virt_to_mfn_walk(void *va)
>  {
> -    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
> -    unsigned long ret = l1e_get_pfn(*pl1e);
> +    unsigned long ret;
> +    l3_pgentry_t *pl3e;
> +    l2_pgentry_t *pl2e;
> +    l1_pgentry_t *pl1e;
> +
> +    /*
> +     * FIXME: This is rather unoptimised, because e.g. virt_to_xen_l2e
> +     * recomputes virt_to_xen_l3e again. Clearly one can keep the result and
> +     * carry on.
> +     */
> +
> +    pl3e = virt_to_xen_l3e((unsigned long)(va));
> +    BUG_ON(!(l3e_get_flags(*pl3e) & _PAGE_PRESENT));
> +    if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
> +    {
> +        ret = l3e_get_pfn(*pl3e);
> +        ret |= (((unsigned long)va & ((1UL << L3_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
> +        unmap_xen_pagetable(pl3e);
> +        return ret;
> +    }
> +
> +    pl2e = virt_to_xen_l2e((unsigned long)(va));
> +    BUG_ON(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT));
> +    if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
> +    {
> +        ret = l2e_get_pfn(*pl2e);
> +        ret |= (((unsigned long)va & ((1UL << L2_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
> +        unmap_xen_pagetable(pl2e);
> +        return ret;
> +    }
> +
> +    pl1e = virt_to_xen_l1e((unsigned long)(va));
> +    BUG_ON(!(l1e_get_flags(*pl1e) & _PAGE_PRESENT));
> +    ret = l1e_get_pfn(*pl1e);

Don't you end up leaking pl3e and pl2e in the !PSE case?

Also, if you only want to walk page table that is already populated,
there may be a better way to do it than calling virt_to_xen_lXe.

Wei.

>      unmap_xen_pagetable(pl1e);
>      return ret;
>  }
> -- 
> 2.17.1
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
diff mbox series

Patch

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ab760ecc1f..39ba9f9bf4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5058,8 +5058,40 @@  l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
 
 unsigned long virt_to_mfn_walk(void *va)
 {
-    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
-    unsigned long ret = l1e_get_pfn(*pl1e);
+    unsigned long ret;
+    l3_pgentry_t *pl3e;
+    l2_pgentry_t *pl2e;
+    l1_pgentry_t *pl1e;
+
+    /*
+     * FIXME: This is rather unoptimised, because e.g. virt_to_xen_l2e
+     * recomputes virt_to_xen_l3e again. Clearly one can keep the result and
+     * carry on.
+     */
+
+    pl3e = virt_to_xen_l3e((unsigned long)(va));
+    BUG_ON(!(l3e_get_flags(*pl3e) & _PAGE_PRESENT));
+    if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
+    {
+        ret = l3e_get_pfn(*pl3e);
+        ret |= (((unsigned long)va & ((1UL << L3_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
+        unmap_xen_pagetable(pl3e);
+        return ret;
+    }
+
+    pl2e = virt_to_xen_l2e((unsigned long)(va));
+    BUG_ON(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT));
+    if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
+    {
+        ret = l2e_get_pfn(*pl2e);
+        ret |= (((unsigned long)va & ((1UL << L2_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
+        unmap_xen_pagetable(pl2e);
+        return ret;
+    }
+
+    pl1e = virt_to_xen_l1e((unsigned long)(va));
+    BUG_ON(!(l1e_get_flags(*pl1e) & _PAGE_PRESENT));
+    ret = l1e_get_pfn(*pl1e);
     unmap_xen_pagetable(pl1e);
     return ret;
 }