diff mbox series

[v2,22/55] x86_64/mm: switch to new APIs in paging_init

Message ID 4429f3d4cb3075d3dc2f30b3f8273e3620b8d995.1569833766.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Switch to domheap for Xen PTEs | expand

Commit Message

Xia, Hongyan Sept. 30, 2019, 10:33 a.m. UTC
From: Wei Liu <wei.liu2@citrix.com>

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Hongyan Xia <hongyax@amazon.com>

---
Changed since v1:
  * use a global mapping for compat_idle_pg_table_l2, otherwise
    l2_ro_mpt will unmap it.
---
 xen/arch/x86/x86_64/mm.c | 50 +++++++++++++++++++++++++++++-----------
 1 file changed, 37 insertions(+), 13 deletions(-)

Comments

Wei Liu Oct. 1, 2019, 11:51 a.m. UTC | #1
On Mon, Sep 30, 2019 at 11:33:14AM +0100, Hongyan Xia wrote:
> From: Wei Liu <wei.liu2@citrix.com>
> 
> Signed-off-by: Wei Liu <wei.liu2@citrix.com>
> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
> 
> ---
> Changed since v1:
>   * use a global mapping for compat_idle_pg_table_l2, otherwise
>     l2_ro_mpt will unmap it.

Hmmm... I wonder why XTF didn't catch this.

If we really want to go all the way to eliminate persistent mappings
for page tables, the code should be changed such that:

1. compat_idle_pg_table_l2 should be changed to store mfn, not va.
2. map and unmap that mfn when access to the compat page table is
   required.

Wei.
Xia, Hongyan Oct. 1, 2019, 1:39 p.m. UTC | #2
On 01/10/2019 12:51, Wei Liu wrote:
> On Mon, Sep 30, 2019 at 11:33:14AM +0100, Hongyan Xia wrote:
>> From: Wei Liu <wei.liu2@citrix.com>
>>
>> Signed-off-by: Wei Liu <wei.liu2@citrix.com>
>> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
>>
>> ---
>> Changed since v1:
>>    * use a global mapping for compat_idle_pg_table_l2, otherwise
>>      l2_ro_mpt will unmap it.
> 
> Hmmm... I wonder why XTF didn't catch this.
> 

Well, probably because this only shows up when we actually remove all fast 
paths and the direct map. If we just apply this batch, unmap on the direct map 
is just a no-op. I caught this with my later patches.

> If we really want to go all the way to eliminate persistent mappings
> for page tables, the code should be changed such that:
> 
> 1. compat_idle_pg_table_l2 should be changed to store mfn, not va.
> 2. map and unmap that mfn when access to the compat page table is
>     required.
> 

Sounds sensible and more consistent with other PTEs.

Hongyan
diff mbox series

Patch

diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index ac5e366e5b..c8c71564ba 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -496,9 +496,10 @@  void __init paging_init(void)
 {
     unsigned long i, mpt_size, va;
     unsigned int n, memflags;
-    l3_pgentry_t *l3_ro_mpt;
-    l2_pgentry_t *pl2e = NULL, *l2_ro_mpt;
+    l3_pgentry_t *l3_ro_mpt = NULL;
+    l2_pgentry_t *pl2e = NULL, *l2_ro_mpt = NULL;
     struct page_info *l1_pg;
+    mfn_t l3_ro_mpt_mfn, l2_ro_mpt_mfn;
 
     /*
      * We setup the L3s for 1:1 mapping if host support memory hotplug
@@ -511,22 +512,29 @@  void __init paging_init(void)
         if ( !(l4e_get_flags(idle_pg_table[l4_table_offset(va)]) &
               _PAGE_PRESENT) )
         {
-            l3_pgentry_t *pl3t = alloc_xen_pagetable();
+            l3_pgentry_t *pl3t;
+            mfn_t mfn;
 
-            if ( !pl3t )
+            mfn = alloc_xen_pagetable_new();
+            if ( mfn_eq(mfn, INVALID_MFN) )
                 goto nomem;
+
+            pl3t = map_xen_pagetable_new(mfn);
             clear_page(pl3t);
             l4e_write(&idle_pg_table[l4_table_offset(va)],
-                      l4e_from_paddr(__pa(pl3t), __PAGE_HYPERVISOR_RW));
+                      l4e_from_mfn(mfn, __PAGE_HYPERVISOR_RW));
+            UNMAP_XEN_PAGETABLE_NEW(pl3t);
         }
     }
 
     /* Create user-accessible L2 directory to map the MPT for guests. */
-    if ( (l3_ro_mpt = alloc_xen_pagetable()) == NULL )
+    l3_ro_mpt_mfn = alloc_xen_pagetable_new();
+    if ( mfn_eq(l3_ro_mpt_mfn, INVALID_MFN) )
         goto nomem;
+    l3_ro_mpt = map_xen_pagetable_new(l3_ro_mpt_mfn);
     clear_page(l3_ro_mpt);
     l4e_write(&idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)],
-              l4e_from_paddr(__pa(l3_ro_mpt), __PAGE_HYPERVISOR_RO | _PAGE_USER));
+              l4e_from_mfn(l3_ro_mpt_mfn, __PAGE_HYPERVISOR_RO | _PAGE_USER));
 
     /*
      * Allocate and map the machine-to-phys table.
@@ -609,12 +617,21 @@  void __init paging_init(void)
         }
         if ( !((unsigned long)pl2e & ~PAGE_MASK) )
         {
-            if ( (l2_ro_mpt = alloc_xen_pagetable()) == NULL )
+            /*
+             * Unmap l2_ro_mpt, which could've been mapped in previous
+             * iteration.
+             */
+            unmap_xen_pagetable_new(l2_ro_mpt);
+
+            l2_ro_mpt_mfn = alloc_xen_pagetable_new();
+            if ( mfn_eq(l2_ro_mpt_mfn, INVALID_MFN) )
                 goto nomem;
+
+            l2_ro_mpt = map_xen_pagetable_new(l2_ro_mpt_mfn);
             clear_page(l2_ro_mpt);
             l3e_write(&l3_ro_mpt[l3_table_offset(va)],
-                      l3e_from_paddr(__pa(l2_ro_mpt),
-                                     __PAGE_HYPERVISOR_RO | _PAGE_USER));
+                      l3e_from_mfn(l2_ro_mpt_mfn,
+                                   __PAGE_HYPERVISOR_RO | _PAGE_USER));
             pl2e = l2_ro_mpt;
             ASSERT(!l2_table_offset(va));
         }
@@ -626,18 +643,23 @@  void __init paging_init(void)
     }
 #undef CNT
 #undef MFN
+    UNMAP_XEN_PAGETABLE_NEW(l2_ro_mpt);
+    UNMAP_XEN_PAGETABLE_NEW(l3_ro_mpt);
 
     /* Create user-accessible L2 directory to map the MPT for compat guests. */
     BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) !=
                  l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
     l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(
         HIRO_COMPAT_MPT_VIRT_START)]);
-    if ( (l2_ro_mpt = alloc_xen_pagetable()) == NULL )
+
+    l2_ro_mpt_mfn = alloc_xen_pagetable_new();
+    if ( mfn_eq(l2_ro_mpt_mfn, INVALID_MFN) )
         goto nomem;
-    compat_idle_pg_table_l2 = l2_ro_mpt;
+    l2_ro_mpt = map_xen_pagetable_new(l2_ro_mpt_mfn);
+    compat_idle_pg_table_l2 = map_domain_page_global(l2_ro_mpt_mfn);
     clear_page(l2_ro_mpt);
     l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
-              l3e_from_paddr(__pa(l2_ro_mpt), __PAGE_HYPERVISOR_RO));
+              l3e_from_mfn(l2_ro_mpt_mfn, __PAGE_HYPERVISOR_RO));
     pl2e = l2_ro_mpt;
     pl2e += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
     /* Allocate and map the compatibility mode machine-to-phys table. */
@@ -679,6 +701,8 @@  void __init paging_init(void)
 #undef CNT
 #undef MFN
 
+    UNMAP_XEN_PAGETABLE_NEW(l2_ro_mpt);
+
     machine_to_phys_mapping_valid = 1;
 
     /* Set up linear page table mapping. */