@@ -5056,42 +5056,57 @@ l1_pgentry_t *virt_to_xen_l1e(unsigned long v)
return pl1e;
}
+/*
+ * Unlike virt_to_mfn() which just translates between the direct map and the
+ * mfn, this version actually walks the page table to find the mfn of any
+ * virtual address, as long as it is mapped. If not, INVALID_MFN is returned.
+ */
unsigned long virt_to_mfn_walk(void *va)
{
unsigned long ret;
- l3_pgentry_t *pl3e;
- l2_pgentry_t *pl2e;
- l1_pgentry_t *pl1e;
-
- /*
- * FIXME: This is rather unoptimised, because e.g. virt_to_xen_l2e
- * recomputes virt_to_xen_l3e again. Clearly one can keep the result and
- * carry on.
- */
+ unsigned long v = (unsigned long)va;
+ l3_pgentry_t *pl3e = NULL;
+ l2_pgentry_t *pl2e = NULL;
+ l1_pgentry_t *pl1e = NULL;
- pl3e = virt_to_xen_l3e((unsigned long)(va));
- BUG_ON(!(l3e_get_flags(*pl3e) & _PAGE_PRESENT));
+ pl3e = virt_to_xen_l3e(v);
+ if ( !pl3e || !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
+ {
+ ret = mfn_x(INVALID_MFN);
+ goto out;
+ }
if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
{
ret = l3e_get_pfn(*pl3e);
- ret |= (((unsigned long)va & ((1UL << L3_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
- unmap_xen_pagetable(pl3e);
- return ret;
+ ret |= ((v & ((1UL << L3_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
+ goto out;
}
- pl2e = virt_to_xen_l2e((unsigned long)(va));
- BUG_ON(!(l2e_get_flags(*pl2e) & _PAGE_PRESENT));
+ pl2e = (l2_pgentry_t *)map_xen_pagetable(l3e_get_mfn(*pl3e))
+ + l2_table_offset(v);
+ if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
+ {
+ ret = mfn_x(INVALID_MFN);
+ goto out;
+ }
if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
{
ret = l2e_get_pfn(*pl2e);
- ret |= (((unsigned long)va & ((1UL << L2_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
- unmap_xen_pagetable(pl2e);
- return ret;
+ ret |= ((v & ((1UL << L2_PAGETABLE_SHIFT)-1)) >> PAGE_SHIFT);
+ goto out;
}
- pl1e = virt_to_xen_l1e((unsigned long)(va));
- BUG_ON(!(l1e_get_flags(*pl1e) & _PAGE_PRESENT));
+ pl1e = (l1_pgentry_t *)map_xen_pagetable(l2e_get_mfn(*pl2e))
+ + l1_table_offset(v);
+ if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
+ {
+ ret = mfn_x(INVALID_MFN);
+ goto out;
+ }
ret = l1e_get_pfn(*pl1e);
+out:
+ unmap_xen_pagetable(pl3e);
+ unmap_xen_pagetable(pl2e);
unmap_xen_pagetable(pl1e);
return ret;
}
@@ -645,7 +645,8 @@ void free_xen_pagetable(mfn_t mfn);
l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
unsigned long virt_to_mfn_walk(void *va);
struct page_info *virt_to_page_walk(void *va);
-#define virt_to_maddr_walk(va) mfn_to_maddr(_mfn(virt_to_mfn_walk(va)))
+#define virt_to_maddr_walk(va) (mfn_to_maddr(_mfn(virt_to_mfn_walk(va))) | \
+ ((unsigned long)va & (PAGE_SIZE - 1)))
DECLARE_PER_CPU(mfn_t, root_pgt_mfn);