Message ID | 1463648711-26595-2-git-send-email-yu.c.zhang@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
>>> On 19.05.16 at 11:05, <yu.c.zhang@linux.intel.com> wrote: > @@ -5507,6 +5507,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) > get_gfn_query_unlocked(d, a.pfn, &t); > if ( p2m_is_mmio(t) ) > a.mem_type = HVMMEM_mmio_dm; > + else if ( t == p2m_ioreq_server ) > + a.mem_type = HVMMEM_ioreq_server; > else if ( p2m_is_readonly(t) ) > a.mem_type = HVMMEM_ram_ro; > else if ( p2m_is_ram(t) ) I can see this being suitable to be done here, but ... > @@ -5537,7 +5539,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) > [HVMMEM_ram_rw] = p2m_ram_rw, > [HVMMEM_ram_ro] = p2m_ram_ro, > [HVMMEM_mmio_dm] = p2m_mmio_dm, > - [HVMMEM_unused] = p2m_invalid > + [HVMMEM_unused] = p2m_invalid, > + [HVMMEM_ioreq_server] = p2m_ioreq_server > }; > > if ( copy_from_guest(&a, arg, 1) ) ... how can this be correct without actual handling having got added? IOW doesn't at least this change belong into a later patch? Jan
On 14/06/16 11:04, Jan Beulich wrote: >>>> On 19.05.16 at 11:05, <yu.c.zhang@linux.intel.com> wrote: >> @@ -5507,6 +5507,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) >> get_gfn_query_unlocked(d, a.pfn, &t); >> if ( p2m_is_mmio(t) ) >> a.mem_type = HVMMEM_mmio_dm; >> + else if ( t == p2m_ioreq_server ) >> + a.mem_type = HVMMEM_ioreq_server; >> else if ( p2m_is_readonly(t) ) >> a.mem_type = HVMMEM_ram_ro; >> else if ( p2m_is_ram(t) ) > > I can see this being suitable to be done here, but ... > >> @@ -5537,7 +5539,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) >> [HVMMEM_ram_rw] = p2m_ram_rw, >> [HVMMEM_ram_ro] = p2m_ram_ro, >> [HVMMEM_mmio_dm] = p2m_mmio_dm, >> - [HVMMEM_unused] = p2m_invalid >> + [HVMMEM_unused] = p2m_invalid, >> + [HVMMEM_ioreq_server] = p2m_ioreq_server >> }; >> >> if ( copy_from_guest(&a, arg, 1) ) > > ... how can this be correct without actual handling having got added? > IOW doesn't at least this change belong into a later patch? +1 With that change: Acked-by: George Dunlap <george.dunlap@citrix.com>
On 6/14/2016 6:04 PM, Jan Beulich wrote: >>>> On 19.05.16 at 11:05, <yu.c.zhang@linux.intel.com> wrote: >> @@ -5507,6 +5507,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) >> get_gfn_query_unlocked(d, a.pfn, &t); >> if ( p2m_is_mmio(t) ) >> a.mem_type = HVMMEM_mmio_dm; >> + else if ( t == p2m_ioreq_server ) >> + a.mem_type = HVMMEM_ioreq_server; >> else if ( p2m_is_readonly(t) ) >> a.mem_type = HVMMEM_ram_ro; >> else if ( p2m_is_ram(t) ) > I can see this being suitable to be done here, but ... > >> @@ -5537,7 +5539,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) >> [HVMMEM_ram_rw] = p2m_ram_rw, >> [HVMMEM_ram_ro] = p2m_ram_ro, >> [HVMMEM_mmio_dm] = p2m_mmio_dm, >> - [HVMMEM_unused] = p2m_invalid >> + [HVMMEM_unused] = p2m_invalid, >> + [HVMMEM_ioreq_server] = p2m_ioreq_server >> }; >> >> if ( copy_from_guest(&a, arg, 1) ) > ... how can this be correct without actual handling having got added? > IOW doesn't at least this change belong into a later patch? Thanks for your comments. :) Well, although the new handling logic is in the 3rd patch, we still have the old handling code. Without the other patches, a developer can still use HVMMEM_ioreq_server to write-protect some guest ram pages, and try to handle the write operations on these pages with the old approach - tracking these gfns insid the ioreq server's rangeset. Thanks Yu
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 5040a5c..21bc45c 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1857,7 +1857,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, */ if ( (p2mt == p2m_mmio_dm) || (npfec.write_access && - (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) ) + (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) ) { __put_gfn(p2m, gfn); if ( ap2m_active ) @@ -5507,6 +5507,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) get_gfn_query_unlocked(d, a.pfn, &t); if ( p2m_is_mmio(t) ) a.mem_type = HVMMEM_mmio_dm; + else if ( t == p2m_ioreq_server ) + a.mem_type = HVMMEM_ioreq_server; else if ( p2m_is_readonly(t) ) a.mem_type = HVMMEM_ram_ro; else if ( p2m_is_ram(t) ) @@ -5537,7 +5539,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) [HVMMEM_ram_rw] = p2m_ram_rw, [HVMMEM_ram_ro] = p2m_ram_ro, [HVMMEM_mmio_dm] = p2m_mmio_dm, - [HVMMEM_unused] = p2m_invalid + [HVMMEM_unused] = p2m_invalid, + [HVMMEM_ioreq_server] = p2m_ioreq_server }; if ( copy_from_guest(&a, arg, 1) ) @@ -5586,7 +5589,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) } if ( !p2m_is_ram(t) && (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) && - (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) ) + (t != p2m_ioreq_server || a.hvmmem_type != HVMMEM_ram_rw) ) { put_gfn(d, pfn); goto setmemtype_fail; diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index 1ed5b47..a45a573 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -171,7 +171,7 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry, entry->a = entry->d = !!cpu_has_vmx_ept_ad; break; case p2m_grant_map_ro: - case p2m_mmio_write_dm: + case p2m_ioreq_server: entry->r = 1; entry->w = entry->x = 0; entry->a = !!cpu_has_vmx_ept_ad; diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index 3d80612..eabd2e3 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -94,7 +94,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn, default: return flags | _PAGE_NX_BIT; case p2m_grant_map_ro: - case p2m_mmio_write_dm: + case p2m_ioreq_server: return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT; case p2m_ram_ro: case p2m_ram_logdirty: diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index 428be37..b322293 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -3226,7 +3226,7 @@ static int sh_page_fault(struct vcpu *v, /* Need to hand off device-model MMIO to the device model */ if ( p2mt == p2m_mmio_dm - || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) ) + || (p2mt == p2m_ioreq_server && ft == ft_demand_write) ) { gpa = guest_walk_to_gpa(&gw); goto mmio; diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 65675a2..f3e87d6 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -71,7 +71,7 @@ typedef enum { p2m_ram_shared = 12, /* Shared or sharable memory */ p2m_ram_broken = 13, /* Broken page, access cause domain crash */ p2m_map_foreign = 14, /* ram pages from foreign domain */ - p2m_mmio_write_dm = 15, /* Read-only; writes go to the device model */ + p2m_ioreq_server = 15, } p2m_type_t; /* Modifiers to the query */ @@ -112,7 +112,7 @@ typedef unsigned int p2m_query_t; | p2m_to_mask(p2m_ram_ro) \ | p2m_to_mask(p2m_grant_map_ro) \ | p2m_to_mask(p2m_ram_shared) \ - | p2m_to_mask(p2m_mmio_write_dm)) + | p2m_to_mask(p2m_ioreq_server)) /* Write-discard types, which should discard the write operations */ #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \ diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index ebb907a..b3e45cf 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -84,11 +84,12 @@ typedef enum { HVMMEM_ram_ro, /* Read-only; writes are discarded */ HVMMEM_mmio_dm, /* Reads and write go to the device model */ #if __XEN_INTERFACE_VERSION__ < 0x00040700 - HVMMEM_mmio_write_dm /* Read-only; writes go to the device model */ + HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */ #else - HVMMEM_unused /* Placeholder; setting memory to this type + HVMMEM_unused, /* Placeholder; setting memory to this type will fail for code after 4.7.0 */ #endif + HVMMEM_ioreq_server } hvmmem_type_t; /* Following tools-only interfaces may change in future. */