diff mbox

[2/3] Rename p2m_mmio_write_dm to p2m_ioreq_server

Message ID 1458130916-30068-1-git-send-email-yu.c.zhang@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Yu Zhang March 16, 2016, 12:21 p.m. UTC
Previously p2m type p2m_mmio_write_dm was introduced for write-
protected memory pages whose write operations are supposed to be
forwarded to and emulated by an ioreq server. Yet limitations of
rangeset restricts the number of guest pages to be write-protected.

This patch replace the p2m type p2m_mmio_write_dm with a new name:
p2m_ioreq_server, which means this p2m type can be claimed by one
ioreq server, instead of being tracked inside the rangeset of ioreq
server. Patches following up will add the related hvmop handling
code which maps type p2m_ioreq_server to an ioreq server.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
 xen/arch/x86/hvm/hvm.c          | 10 +++++-----
 xen/arch/x86/mm/p2m-ept.c       |  2 +-
 xen/arch/x86/mm/p2m-pt.c        |  2 +-
 xen/arch/x86/mm/shadow/multi.c  |  2 +-
 xen/include/asm-x86/p2m.h       |  7 ++++---
 xen/include/public/hvm/hvm_op.h |  2 +-
 6 files changed, 13 insertions(+), 12 deletions(-)

Comments

George Dunlap March 22, 2016, 2:15 p.m. UTC | #1
On 16/03/16 12:21, Yu Zhang wrote:
> Previously p2m type p2m_mmio_write_dm was introduced for write-
> protected memory pages whose write operations are supposed to be
> forwarded to and emulated by an ioreq server. Yet limitations of
> rangeset restricts the number of guest pages to be write-protected.
> 
> This patch replace the p2m type p2m_mmio_write_dm with a new name:
> p2m_ioreq_server, which means this p2m type can be claimed by one
> ioreq server, instead of being tracked inside the rangeset of ioreq
> server. Patches following up will add the related hvmop handling
> code which maps type p2m_ioreq_server to an ioreq server.
> 
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
> ---

[snip]

> @@ -174,7 +174,8 @@ typedef unsigned int p2m_query_t;
>  
>  #define p2m_is_any_ram(_t)  (p2m_to_mask(_t) &                   \
>                               (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
> -                              p2m_to_mask(p2m_map_foreign)))
> +                              p2m_to_mask(p2m_map_foreign) | \
> +                              p2m_to_mask(p2m_ioreq_server)))

So this is the only bit that doesn't seem to be a straight rename.
What's the rationale for adding this in this patch?

And in any case, we want to add this to P2M_RAM_TYPES, don't we, so that
p2m_is_ram() returns true?

For example, if p2m_ioreq_server is *not* marked as p2m_is_ram(), then
when it's ballooned out the m2p mapping won't be updated properly (see
xen/arch/x86/mm/p2m.c:set_typed_p2m_entry()); and it looks like there
could be issues with emulation when running in shadow mode (see
xen/arch/x86/mm/common.c:emulate_gva_to_mfn()).  Other examples of this
sort of thing abound.

Everything else looks fine to me.

 -George
Yu Zhang March 23, 2016, 6:30 a.m. UTC | #2
On 3/22/2016 10:15 PM, George Dunlap wrote:
> On 16/03/16 12:21, Yu Zhang wrote:
>> Previously p2m type p2m_mmio_write_dm was introduced for write-
>> protected memory pages whose write operations are supposed to be
>> forwarded to and emulated by an ioreq server. Yet limitations of
>> rangeset restricts the number of guest pages to be write-protected.
>>
>> This patch replace the p2m type p2m_mmio_write_dm with a new name:
>> p2m_ioreq_server, which means this p2m type can be claimed by one
>> ioreq server, instead of being tracked inside the rangeset of ioreq
>> server. Patches following up will add the related hvmop handling
>> code which maps type p2m_ioreq_server to an ioreq server.
>>
>> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
>> Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
>> ---
>
> [snip]
>
>> @@ -174,7 +174,8 @@ typedef unsigned int p2m_query_t;
>>
>>   #define p2m_is_any_ram(_t)  (p2m_to_mask(_t) &                   \
>>                                (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
>> -                              p2m_to_mask(p2m_map_foreign)))
>> +                              p2m_to_mask(p2m_map_foreign) | \
>> +                              p2m_to_mask(p2m_ioreq_server)))
>
> So this is the only bit that doesn't seem to be a straight rename.
> What's the rationale for adding this in this patch?
>
> And in any case, we want to add this to P2M_RAM_TYPES, don't we, so that
> p2m_is_ram() returns true?
>
> For example, if p2m_ioreq_server is *not* marked as p2m_is_ram(), then
> when it's ballooned out the m2p mapping won't be updated properly (see
> xen/arch/x86/mm/p2m.c:set_typed_p2m_entry()); and it looks like there
> could be issues with emulation when running in shadow mode (see
> xen/arch/x86/mm/common.c:emulate_gva_to_mfn()).  Other examples of this
> sort of thing abound.
>

Thank you, George. I now believe the right place we add p2m_ioreq_server
is in the P2M_RAM_TYPES. :)

> Everything else looks fine to me.
>
>   -George
>

B.R.
Yu
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 3ccd33f..07eee4a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3176,7 +3176,7 @@  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
      */
     if ( (p2mt == p2m_mmio_dm) || 
          (npfec.write_access &&
-          (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
+          (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
     {
         __put_gfn(p2m, gfn);
         if ( ap2m_active )
@@ -6587,8 +6587,8 @@  static int hvmop_get_mem_type(
         get_gfn_query_unlocked(d, a.pfn, &t);
         if ( p2m_is_mmio(t) )
             a.mem_type =  HVMMEM_mmio_dm;
-        else if ( t == p2m_mmio_write_dm )
-            a.mem_type = HVMMEM_mmio_write_dm;
+        else if ( t == p2m_ioreq_server )
+            a.mem_type = HVMMEM_ioreq_server;
         else if ( p2m_is_readonly(t) )
             a.mem_type =  HVMMEM_ram_ro;
         else if ( p2m_is_ram(t) )
@@ -6620,7 +6620,7 @@  static bool_t hvm_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
 {
     if ( p2m_is_ram(old) ||
          (p2m_is_hole(old) && new == p2m_mmio_dm) ||
-         (old == p2m_mmio_write_dm && new == p2m_ram_rw) )
+         (old == p2m_ioreq_server && new == p2m_ram_rw) )
         return 1;
 
     return 0;
@@ -6640,7 +6640,7 @@  static int hvmop_set_mem_type(
         [HVMMEM_ram_rw]  = p2m_ram_rw,
         [HVMMEM_ram_ro]  = p2m_ram_ro,
         [HVMMEM_mmio_dm] = p2m_mmio_dm,
-        [HVMMEM_mmio_write_dm] = p2m_mmio_write_dm
+        [HVMMEM_ioreq_server] = p2m_ioreq_server
     };
 
     if ( copy_from_guest(&a, arg, 1) )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 3cb6868..380ec25 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -171,7 +171,7 @@  static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry,
             entry->a = entry->d = !!cpu_has_vmx_ept_ad;
             break;
         case p2m_grant_map_ro:
-        case p2m_mmio_write_dm:
+        case p2m_ioreq_server:
             entry->r = 1;
             entry->w = entry->x = 0;
             entry->a = !!cpu_has_vmx_ept_ad;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3d80612..eabd2e3 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -94,7 +94,7 @@  static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn,
     default:
         return flags | _PAGE_NX_BIT;
     case p2m_grant_map_ro:
-    case p2m_mmio_write_dm:
+    case p2m_ioreq_server:
         return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index e5c8499..c81302a 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3225,7 +3225,7 @@  static int sh_page_fault(struct vcpu *v,
 
     /* Need to hand off device-model MMIO to the device model */
     if ( p2mt == p2m_mmio_dm
-         || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
+         || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
     {
         gpa = guest_walk_to_gpa(&gw);
         goto mmio;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 5392eb0..084a1f2 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -71,7 +71,7 @@  typedef enum {
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
     p2m_map_foreign  = 14,        /* ram pages from foreign domain */
-    p2m_mmio_write_dm = 15,       /* Read-only; writes go to the device model */
+    p2m_ioreq_server = 15,
 } p2m_type_t;
 
 /* Modifiers to the query */
@@ -112,7 +112,7 @@  typedef unsigned int p2m_query_t;
                       | p2m_to_mask(p2m_ram_ro)         \
                       | p2m_to_mask(p2m_grant_map_ro)   \
                       | p2m_to_mask(p2m_ram_shared)     \
-                      | p2m_to_mask(p2m_mmio_write_dm))
+                      | p2m_to_mask(p2m_ioreq_server))
 
 /* Write-discard types, which should discard the write operations */
 #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro)     \
@@ -174,7 +174,8 @@  typedef unsigned int p2m_query_t;
 
 #define p2m_is_any_ram(_t)  (p2m_to_mask(_t) &                   \
                              (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
-                              p2m_to_mask(p2m_map_foreign)))
+                              p2m_to_mask(p2m_map_foreign) | \
+                              p2m_to_mask(p2m_ioreq_server)))
 
 #define p2m_allows_invalid_mfn(t) (p2m_to_mask(t) & P2M_INVALID_MFN_TYPES)
 
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 1606185..a1eae52 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -83,7 +83,7 @@  typedef enum {
     HVMMEM_ram_rw,             /* Normal read/write guest RAM */
     HVMMEM_ram_ro,             /* Read-only; writes are discarded */
     HVMMEM_mmio_dm,            /* Reads and write go to the device model */
-    HVMMEM_mmio_write_dm       /* Read-only; writes go to the device model */
+    HVMMEM_ioreq_server,
 } hvmmem_type_t;
 
 /* Following tools-only interfaces may change in future. */