diff mbox series

[RFC,58/84] x86/mm: fix leaks in map_xen_pagetable.

Message ID 6d79e6301ff15af71b21c64d10760abb9775b626.1569489002.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Remove direct map from Xen | expand

Commit Message

Xia, Hongyan Sept. 26, 2019, 9:46 a.m. UTC
From: Hongyan Xia <hongyax@amazon.com>

Not unmapping pages after map_xen_pagetable can leak the virtual address
space over time. Also this fix makes vmap_to_mfn non-trivial to be a
macro. There might be better options but move it into vmap.c for now.

Signed-off-by: Hongyan Xia <hongyax@amazon.com>
---
 xen/arch/x86/mm.c          |  5 +----
 xen/common/vmap.c          | 13 +++++++++++++
 xen/include/asm-arm/mm.h   |  2 --
 xen/include/asm-x86/page.h |  2 --
 xen/include/xen/vmap.h     |  3 +++
 5 files changed, 17 insertions(+), 8 deletions(-)

Comments

Julien Grall Sept. 26, 2019, 10:23 a.m. UTC | #1
Hi,

NIT: we don't usually add full stop at the end of the title. This 
applies for the rest of the series.

On 9/26/19 10:46 AM, hongyax@amazon.com wrote:
> From: Hongyan Xia <hongyax@amazon.com>
> 
> Not unmapping pages after map_xen_pagetable can leak the virtual address
> space over time. Also this fix makes vmap_to_mfn non-trivial to be a
> macro. There might be better options but move it into vmap.c for now.
> 
> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
> ---
>   xen/arch/x86/mm.c          |  5 +----
>   xen/common/vmap.c          | 13 +++++++++++++
>   xen/include/asm-arm/mm.h   |  2 --
>   xen/include/asm-x86/page.h |  2 --
>   xen/include/xen/vmap.h     |  3 +++
>   5 files changed, 17 insertions(+), 8 deletions(-)
> 
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index b2b2edbed1..145c5ab47c 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5160,6 +5160,7 @@ int map_pages_to_xen(
>                                !(l2e_get_flags(ol2e) & _PAGE_PSE) )
>                               free_xen_pagetable(l2e_get_mfn(ol2e));
>                       }
> +                    UNMAP_XEN_PAGETABLE(l2t);
>                       free_xen_pagetable(l2t_mfn);
>                   }
>               }
> @@ -5225,7 +5226,6 @@ int map_pages_to_xen(
>                   l3e_write_atomic(pl3e,
>                                    l3e_from_mfn(l2t_mfn, __PAGE_HYPERVISOR));
>                   UNMAP_XEN_PAGETABLE(l2t);
> -                l2t = NULL;
>               }
>               if ( locking )
>                   spin_unlock(&map_pgdir_lock);
> @@ -5346,7 +5346,6 @@ int map_pages_to_xen(
>                       l2e_write_atomic(pl2e, l2e_from_mfn(l1t_mfn,
>                                                           __PAGE_HYPERVISOR));
>                       UNMAP_XEN_PAGETABLE(l1t);
> -                    l1t = NULL;
>                   }
>                   if ( locking )
>                       spin_unlock(&map_pgdir_lock);
> @@ -5589,7 +5588,6 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
>               {
>                   l3e_write_atomic(pl3e, l3e_from_mfn(mfn, __PAGE_HYPERVISOR));
>                   UNMAP_XEN_PAGETABLE(l2t);
> -                l2t = NULL;
>               }
>               if ( locking )
>                   spin_unlock(&map_pgdir_lock);
> @@ -5657,7 +5655,6 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
>                       l2e_write_atomic(pl2e, l2e_from_mfn(mfn,
>                                                           __PAGE_HYPERVISOR));
>                       UNMAP_XEN_PAGETABLE(l1t);
> -                    l1t = NULL;
>                   }
>                   if ( locking )
>                       spin_unlock(&map_pgdir_lock);
> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
> index faebc1ddf1..fcdb8495c8 100644
> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c
> @@ -19,6 +19,19 @@ static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
>   /* lowest known clear bit in the bitmap */
>   static unsigned int vm_low[VMAP_REGION_NR];
>   
> +mfn_t vmap_to_mfn(void *va)
> +{
> +    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
> +    mfn_t ret = _mfn(l1e_get_pfn(*pl1e));

We already

> +    unmap_xen_pagetable(pl1e);

My knowledge of the x86 port is quite limited, but this feels suspicious 
to see an unmap in a function call vmap_to_mfn(). Where does is the map 
done?

Furthermore, this is not going to compile on Arm. You probably want to 
implement this function in x86 code and keep the current implementation 
for Arm.

> +    return ret;
> +}
> +
> +struct page_info *vmap_to_page(void *va)
> +{
> +    return mfn_to_page(vmap_to_mfn(va));
> +}

This is the exact same implementation as in mm.h. So what's the 
advantage of this?

> +
>   void __init vm_init_type(enum vmap_region type, void *start, void *end)
>   {
>       unsigned int i, nr;
> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index 262d92f18d..1b53429255 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -231,8 +231,6 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
>   #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
>   #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
>   #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
> -#define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
> -#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
>   
>   /* Page-align address and convert to frame number format */
>   #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
> diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
> index 906ec701a3..191de86bff 100644
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -266,8 +266,6 @@ void copy_page_sse2(void *, const void *);
>   #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
>   #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
>   #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
> -#define vmap_to_mfn(va)     _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
> -#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
>   
>   #endif /* !defined(__ASSEMBLY__) */
>   
> diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h
> index 369560e620..3d69727a9d 100644
> --- a/xen/include/xen/vmap.h
> +++ b/xen/include/xen/vmap.h
> @@ -23,6 +23,9 @@ void *vmalloc_xen(size_t size);
>   void *vzalloc(size_t size);
>   void vfree(void *va);
>   
> +mfn_t vmap_to_mfn(void *va);
> +struct page_info *vmap_to_page(void *va);
> +
>   void __iomem *ioremap(paddr_t, size_t);
>   
>   static inline void iounmap(void __iomem *va)
> 

Cheers,
Xia, Hongyan Sept. 26, 2019, 10:45 a.m. UTC | #2
On 26/09/2019 11:23, Julien Grall wrote:
> Hi,
> 
> NIT: we don't usually add full stop at the end of the title. This applies for 
> the rest of the series.

Thanks.

> 
> On 9/26/19 10:46 AM, hongyax@amazon.com wrote:
>> From: Hongyan Xia <hongyax@amazon.com>
>>
>> Not unmapping pages after map_xen_pagetable can leak the virtual address
>> space over time. Also this fix makes vmap_to_mfn non-trivial to be a
>> macro. There might be better options but move it into vmap.c for now.
>>
>> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
>> ---
>>   xen/arch/x86/mm.c          |  5 +----
>>   xen/common/vmap.c          | 13 +++++++++++++
>>   xen/include/asm-arm/mm.h   |  2 --
>>   xen/include/asm-x86/page.h |  2 --
>>   xen/include/xen/vmap.h     |  3 +++
>>   5 files changed, 17 insertions(+), 8 deletions(-)
>>
>> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
>> index b2b2edbed1..145c5ab47c 100644
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -5160,6 +5160,7 @@ int map_pages_to_xen(
>>                                !(l2e_get_flags(ol2e) & _PAGE_PSE) )
>>                               free_xen_pagetable(l2e_get_mfn(ol2e));
>>                       }
>> +                    UNMAP_XEN_PAGETABLE(l2t);
>>                       free_xen_pagetable(l2t_mfn);
>>                   }
>>               }
>> @@ -5225,7 +5226,6 @@ int map_pages_to_xen(
>>                   l3e_write_atomic(pl3e,
>>                                    l3e_from_mfn(l2t_mfn, __PAGE_HYPERVISOR));
>>                   UNMAP_XEN_PAGETABLE(l2t);
>> -                l2t = NULL;
>>               }
>>               if ( locking )
>>                   spin_unlock(&map_pgdir_lock);
>> @@ -5346,7 +5346,6 @@ int map_pages_to_xen(
>>                       l2e_write_atomic(pl2e, l2e_from_mfn(l1t_mfn,
>>                                                           __PAGE_HYPERVISOR));
>>                       UNMAP_XEN_PAGETABLE(l1t);
>> -                    l1t = NULL;
>>                   }
>>                   if ( locking )
>>                       spin_unlock(&map_pgdir_lock);
>> @@ -5589,7 +5588,6 @@ int modify_xen_mappings(unsigned long s, unsigned long 
>> e, unsigned int nf)
>>               {
>>                   l3e_write_atomic(pl3e, l3e_from_mfn(mfn, __PAGE_HYPERVISOR));
>>                   UNMAP_XEN_PAGETABLE(l2t);
>> -                l2t = NULL;
>>               }
>>               if ( locking )
>>                   spin_unlock(&map_pgdir_lock);
>> @@ -5657,7 +5655,6 @@ int modify_xen_mappings(unsigned long s, unsigned long 
>> e, unsigned int nf)
>>                       l2e_write_atomic(pl2e, l2e_from_mfn(mfn,
>>                                                           __PAGE_HYPERVISOR));
>>                       UNMAP_XEN_PAGETABLE(l1t);
>> -                    l1t = NULL;
>>                   }
>>                   if ( locking )
>>                       spin_unlock(&map_pgdir_lock);
>> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
>> index faebc1ddf1..fcdb8495c8 100644
>> --- a/xen/common/vmap.c
>> +++ b/xen/common/vmap.c
>> @@ -19,6 +19,19 @@ static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
>>   /* lowest known clear bit in the bitmap */
>>   static unsigned int vm_low[VMAP_REGION_NR];
>> +mfn_t vmap_to_mfn(void *va)
>> +{
>> +    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
>> +    mfn_t ret = _mfn(l1e_get_pfn(*pl1e));
> 
> We already
> 
>> +    unmap_xen_pagetable(pl1e);
> 
> My knowledge of the x86 port is quite limited, but this feels suspicious to see 
> an unmap in a function call vmap_to_mfn(). Where does is the map done?
> 
> Furthermore, this is not going to compile on Arm. You probably want to 
> implement this function in x86 code and keep the current implementation for Arm.
> 

Without the direct map, Xen page tables are accessed after mapping them into 
the address space with map_domain_page(), and unmapped when done. To read the 
l1e for the vmap, the page the l1e is in is first mapped, then the mfn is read, 
then the page is unmapped.

This series is based on 60 patches from Wei, so he might also be able to 
comment more on the details.

>> +    return ret;
>> +}
>> +
>> +struct page_info *vmap_to_page(void *va)
>> +{
>> +    return mfn_to_page(vmap_to_mfn(va));
>> +}
> 
> This is the exact same implementation as in mm.h. So what's the advantage of this?
> 

I agree. This can just be a macro.

Hongyan
Julien Grall Sept. 26, 2019, 12:05 p.m. UTC | #3
Hi,

On 9/26/19 11:45 AM, hongyax@amazon.com wrote:
> On 26/09/2019 11:23, Julien Grall wrote:
>> Hi,
>>
>> NIT: we don't usually add full stop at the end of the title. This 
>> applies for the rest of the series.
> 
> Thanks.
> 
>>
>> On 9/26/19 10:46 AM, hongyax@amazon.com wrote:
>>> From: Hongyan Xia <hongyax@amazon.com>
>>>
>>> Not unmapping pages after map_xen_pagetable can leak the virtual address
>>> space over time. Also this fix makes vmap_to_mfn non-trivial to be a
>>> macro. There might be better options but move it into vmap.c for now.
>>>
>>> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
>>> ---
>>>   xen/arch/x86/mm.c          |  5 +----
>>>   xen/common/vmap.c          | 13 +++++++++++++
>>>   xen/include/asm-arm/mm.h   |  2 --
>>>   xen/include/asm-x86/page.h |  2 --
>>>   xen/include/xen/vmap.h     |  3 +++
>>>   5 files changed, 17 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
>>> index b2b2edbed1..145c5ab47c 100644
>>> --- a/xen/arch/x86/mm.c
>>> +++ b/xen/arch/x86/mm.c
>>> @@ -5160,6 +5160,7 @@ int map_pages_to_xen(
>>>                                !(l2e_get_flags(ol2e) & _PAGE_PSE) )
>>>                               free_xen_pagetable(l2e_get_mfn(ol2e));
>>>                       }
>>> +                    UNMAP_XEN_PAGETABLE(l2t);
>>>                       free_xen_pagetable(l2t_mfn);
>>>                   }
>>>               }
>>> @@ -5225,7 +5226,6 @@ int map_pages_to_xen(
>>>                   l3e_write_atomic(pl3e,
>>>                                    l3e_from_mfn(l2t_mfn, 
>>> __PAGE_HYPERVISOR));
>>>                   UNMAP_XEN_PAGETABLE(l2t);
>>> -                l2t = NULL;
>>>               }
>>>               if ( locking )
>>>                   spin_unlock(&map_pgdir_lock);
>>> @@ -5346,7 +5346,6 @@ int map_pages_to_xen(
>>>                       l2e_write_atomic(pl2e, l2e_from_mfn(l1t_mfn,
>>>                                                           
>>> __PAGE_HYPERVISOR));
>>>                       UNMAP_XEN_PAGETABLE(l1t);
>>> -                    l1t = NULL;
>>>                   }
>>>                   if ( locking )
>>>                       spin_unlock(&map_pgdir_lock);
>>> @@ -5589,7 +5588,6 @@ int modify_xen_mappings(unsigned long s, 
>>> unsigned long e, unsigned int nf)
>>>               {
>>>                   l3e_write_atomic(pl3e, l3e_from_mfn(mfn, 
>>> __PAGE_HYPERVISOR));
>>>                   UNMAP_XEN_PAGETABLE(l2t);
>>> -                l2t = NULL;
>>>               }
>>>               if ( locking )
>>>                   spin_unlock(&map_pgdir_lock);
>>> @@ -5657,7 +5655,6 @@ int modify_xen_mappings(unsigned long s, 
>>> unsigned long e, unsigned int nf)
>>>                       l2e_write_atomic(pl2e, l2e_from_mfn(mfn,
>>>                                                           
>>> __PAGE_HYPERVISOR));
>>>                       UNMAP_XEN_PAGETABLE(l1t);
>>> -                    l1t = NULL;
>>>                   }
>>>                   if ( locking )
>>>                       spin_unlock(&map_pgdir_lock);
>>> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
>>> index faebc1ddf1..fcdb8495c8 100644
>>> --- a/xen/common/vmap.c
>>> +++ b/xen/common/vmap.c
>>> @@ -19,6 +19,19 @@ static unsigned int __read_mostly 
>>> vm_end[VMAP_REGION_NR];
>>>   /* lowest known clear bit in the bitmap */
>>>   static unsigned int vm_low[VMAP_REGION_NR];
>>> +mfn_t vmap_to_mfn(void *va)
>>> +{
>>> +    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
>>> +    mfn_t ret = _mfn(l1e_get_pfn(*pl1e));
>>
>> We already
>>
>>> +    unmap_xen_pagetable(pl1e);
>>
>> My knowledge of the x86 port is quite limited, but this feels 
>> suspicious to see an unmap in a function call vmap_to_mfn(). Where 
>> does is the map done?
>>
>> Furthermore, this is not going to compile on Arm. You probably want to 
>> implement this function in x86 code and keep the current 
>> implementation for Arm.
>>
> 
> Without the direct map, Xen page tables are accessed after mapping them 
> into the address space with map_domain_page(), and unmapped when done. 
> To read the l1e for the vmap, the page the l1e is in is first mapped, 
> then the mfn is read, then the page is unmapped.

I am afraid I still don't understand. Maybe it will become clearer once 
the branch is provided.

Can you provide the exact call path where the corresponding 
map_domain_page will be done. Is is done by virt_to_xen_l1e()?

> 
> This series is based on 60 patches from Wei, so he might also be able to 
> comment more on the details.

So is this a leak introduced by Wei's series?

Cheers,
Wei Liu Sept. 26, 2019, 1:16 p.m. UTC | #4
On Thu, Sep 26, 2019 at 10:46:21AM +0100, hongyax@amazon.com wrote:
> From: Hongyan Xia <hongyax@amazon.com>
> 
> Not unmapping pages after map_xen_pagetable can leak the virtual address
> space over time.

I understand this part, but ...

> Also this fix makes vmap_to_mfn non-trivial to be a
> macro. There might be better options but move it into vmap.c for now.
> 

... not this part.

> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
> ---
>  xen/arch/x86/mm.c          |  5 +----
>  xen/common/vmap.c          | 13 +++++++++++++
>  xen/include/asm-arm/mm.h   |  2 --
>  xen/include/asm-x86/page.h |  2 --
>  xen/include/xen/vmap.h     |  3 +++
>  5 files changed, 17 insertions(+), 8 deletions(-)
> 
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index b2b2edbed1..145c5ab47c 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5160,6 +5160,7 @@ int map_pages_to_xen(
>                               !(l2e_get_flags(ol2e) & _PAGE_PSE) )
>                              free_xen_pagetable(l2e_get_mfn(ol2e));
>                      }
> +                    UNMAP_XEN_PAGETABLE(l2t);

This is presumably the issue you try to fix.

>                      free_xen_pagetable(l2t_mfn);
>                  }
>              }
> @@ -5225,7 +5226,6 @@ int map_pages_to_xen(
>                  l3e_write_atomic(pl3e,
>                                   l3e_from_mfn(l2t_mfn, __PAGE_HYPERVISOR));
>                  UNMAP_XEN_PAGETABLE(l2t);
> -                l2t = NULL;

This and similar changes below are irrelevant to the bug your try to
fix.

UNMAP_XEN_PAGETABLE already sets lXt to NULL. Deleting these two lines
are fine, but they should be folded into one of the previous patches
where UNMAP_XEN_PAGETABLE was used in this function.

>              }
>              if ( locking )
>                  spin_unlock(&map_pgdir_lock);
> @@ -5346,7 +5346,6 @@ int map_pages_to_xen(
>                      l2e_write_atomic(pl2e, l2e_from_mfn(l1t_mfn,
>                                                          __PAGE_HYPERVISOR));
>                      UNMAP_XEN_PAGETABLE(l1t);
> -                    l1t = NULL;
>                  }
>                  if ( locking )
>                      spin_unlock(&map_pgdir_lock);
> @@ -5589,7 +5588,6 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
>              {
>                  l3e_write_atomic(pl3e, l3e_from_mfn(mfn, __PAGE_HYPERVISOR));
>                  UNMAP_XEN_PAGETABLE(l2t);
> -                l2t = NULL;
>              }
>              if ( locking )
>                  spin_unlock(&map_pgdir_lock);
> @@ -5657,7 +5655,6 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
>                      l2e_write_atomic(pl2e, l2e_from_mfn(mfn,
>                                                          __PAGE_HYPERVISOR));
>                      UNMAP_XEN_PAGETABLE(l1t);
> -                    l1t = NULL;
>                  }
>                  if ( locking )
>                      spin_unlock(&map_pgdir_lock);
> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
> index faebc1ddf1..fcdb8495c8 100644
> --- a/xen/common/vmap.c
> +++ b/xen/common/vmap.c

I fail to see why you need to change vmap to fix a leak somewhere else.

I guess I will need to wait for your branch to have a closer look.

Wei.
Xia, Hongyan Sept. 27, 2019, 1:12 p.m. UTC | #5
On 26/09/2019 14:16, Wei Liu wrote:
> On Thu, Sep 26, 2019 at 10:46:21AM +0100, hongyax@amazon.com wrote:
>>
>> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
>> index b2b2edbed1..145c5ab47c 100644
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -5160,6 +5160,7 @@ int map_pages_to_xen(
>>                                !(l2e_get_flags(ol2e) & _PAGE_PSE) )
>>                               free_xen_pagetable(l2e_get_mfn(ol2e));
>>                       }
>> +                    UNMAP_XEN_PAGETABLE(l2t);
> 
> This is presumably the issue you try to fix.
> 

Yes. Actually this patch fixes two issues, this is the first one.

>> diff --git a/xen/common/vmap.c b/xen/common/vmap.c
>> index faebc1ddf1..fcdb8495c8 100644
>> --- a/xen/common/vmap.c
>> +++ b/xen/common/vmap.c
> 
> I fail to see why you need to change vmap to fix a leak somewhere else.
> 

The second leak is that after the patches, we cannot just call vmap_to_mfn() 
because it calls virt_to_xen_l1e() under the hood which maps a page. We have to 
unmap it, therefore I modified the vmap_to_mfn to also do the unmapping. This 
is a separate issue than the first one, so maybe I could split the patch into two.

> I guess I will need to wait for your branch to have a closer look.
> 
> Wei.
>
diff mbox series

Patch

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index b2b2edbed1..145c5ab47c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5160,6 +5160,7 @@  int map_pages_to_xen(
                              !(l2e_get_flags(ol2e) & _PAGE_PSE) )
                             free_xen_pagetable(l2e_get_mfn(ol2e));
                     }
+                    UNMAP_XEN_PAGETABLE(l2t);
                     free_xen_pagetable(l2t_mfn);
                 }
             }
@@ -5225,7 +5226,6 @@  int map_pages_to_xen(
                 l3e_write_atomic(pl3e,
                                  l3e_from_mfn(l2t_mfn, __PAGE_HYPERVISOR));
                 UNMAP_XEN_PAGETABLE(l2t);
-                l2t = NULL;
             }
             if ( locking )
                 spin_unlock(&map_pgdir_lock);
@@ -5346,7 +5346,6 @@  int map_pages_to_xen(
                     l2e_write_atomic(pl2e, l2e_from_mfn(l1t_mfn,
                                                         __PAGE_HYPERVISOR));
                     UNMAP_XEN_PAGETABLE(l1t);
-                    l1t = NULL;
                 }
                 if ( locking )
                     spin_unlock(&map_pgdir_lock);
@@ -5589,7 +5588,6 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
             {
                 l3e_write_atomic(pl3e, l3e_from_mfn(mfn, __PAGE_HYPERVISOR));
                 UNMAP_XEN_PAGETABLE(l2t);
-                l2t = NULL;
             }
             if ( locking )
                 spin_unlock(&map_pgdir_lock);
@@ -5657,7 +5655,6 @@  int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
                     l2e_write_atomic(pl2e, l2e_from_mfn(mfn,
                                                         __PAGE_HYPERVISOR));
                     UNMAP_XEN_PAGETABLE(l1t);
-                    l1t = NULL;
                 }
                 if ( locking )
                     spin_unlock(&map_pgdir_lock);
diff --git a/xen/common/vmap.c b/xen/common/vmap.c
index faebc1ddf1..fcdb8495c8 100644
--- a/xen/common/vmap.c
+++ b/xen/common/vmap.c
@@ -19,6 +19,19 @@  static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
 /* lowest known clear bit in the bitmap */
 static unsigned int vm_low[VMAP_REGION_NR];
 
+mfn_t vmap_to_mfn(void *va)
+{
+    l1_pgentry_t *pl1e = virt_to_xen_l1e((unsigned long)(va));
+    mfn_t ret = _mfn(l1e_get_pfn(*pl1e));
+    unmap_xen_pagetable(pl1e);
+    return ret;
+}
+
+struct page_info *vmap_to_page(void *va)
+{
+    return mfn_to_page(vmap_to_mfn(va));
+}
+
 void __init vm_init_type(enum vmap_region type, void *start, void *end)
 {
     unsigned int i, nr;
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 262d92f18d..1b53429255 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -231,8 +231,6 @@  static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 #define gaddr_to_gfn(ga)    _gfn(paddr_to_pfn(ga))
 #define mfn_to_maddr(mfn)   pfn_to_paddr(mfn_x(mfn))
 #define maddr_to_mfn(ma)    _mfn(paddr_to_pfn(ma))
-#define vmap_to_mfn(va)     maddr_to_mfn(virt_to_maddr((vaddr_t)va))
-#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 /* Page-align address and convert to frame number format */
 #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 906ec701a3..191de86bff 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -266,8 +266,6 @@  void copy_page_sse2(void *, const void *);
 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va)     _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
-#define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 #endif /* !defined(__ASSEMBLY__) */
 
diff --git a/xen/include/xen/vmap.h b/xen/include/xen/vmap.h
index 369560e620..3d69727a9d 100644
--- a/xen/include/xen/vmap.h
+++ b/xen/include/xen/vmap.h
@@ -23,6 +23,9 @@  void *vmalloc_xen(size_t size);
 void *vzalloc(size_t size);
 void vfree(void *va);
 
+mfn_t vmap_to_mfn(void *va);
+struct page_info *vmap_to_page(void *va);
+
 void __iomem *ioremap(paddr_t, size_t);
 
 static inline void iounmap(void __iomem *va)