@@ -1865,7 +1865,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
*/
if ( (p2mt == p2m_mmio_dm) ||
(npfec.write_access &&
- (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
+ (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
{
__put_gfn(p2m, gfn);
if ( ap2m_active )
@@ -5565,6 +5565,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
get_gfn_query_unlocked(d, a.pfn, &t);
if ( p2m_is_mmio(t) )
a.mem_type = HVMMEM_mmio_dm;
+ else if ( t == p2m_ioreq_server )
+ a.mem_type = HVMMEM_ioreq_server;
else if ( p2m_is_readonly(t) )
a.mem_type = HVMMEM_ram_ro;
else if ( p2m_is_ram(t) )
@@ -5595,7 +5597,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
[HVMMEM_ram_rw] = p2m_ram_rw,
[HVMMEM_ram_ro] = p2m_ram_ro,
[HVMMEM_mmio_dm] = p2m_mmio_dm,
- [HVMMEM_unused] = p2m_invalid
+ [HVMMEM_unused] = p2m_invalid,
+ [HVMMEM_ioreq_server] = p2m_ioreq_server
};
if ( copy_from_guest(&a, arg, 1) )
@@ -5644,7 +5647,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
}
if ( !p2m_is_ram(t) &&
(!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
- (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
+ (t != p2m_ioreq_server || a.hvmmem_type != HVMMEM_ram_rw) )
{
put_gfn(d, pfn);
goto setmemtype_fail;
@@ -171,7 +171,7 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry,
entry->a = entry->d = !!cpu_has_vmx_ept_ad;
break;
case p2m_grant_map_ro:
- case p2m_mmio_write_dm:
+ case p2m_ioreq_server:
entry->r = 1;
entry->w = entry->x = 0;
entry->a = !!cpu_has_vmx_ept_ad;
@@ -94,7 +94,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn,
default:
return flags | _PAGE_NX_BIT;
case p2m_grant_map_ro:
- case p2m_mmio_write_dm:
+ case p2m_ioreq_server:
return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
case p2m_ram_ro:
case p2m_ram_logdirty:
@@ -3226,7 +3226,7 @@ static int sh_page_fault(struct vcpu *v,
/* Need to hand off device-model MMIO to the device model */
if ( p2mt == p2m_mmio_dm
- || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
+ || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
{
gpa = guest_walk_to_gpa(&gw);
goto mmio;
@@ -71,7 +71,7 @@ typedef enum {
p2m_ram_shared = 12, /* Shared or sharable memory */
p2m_ram_broken = 13, /* Broken page, access cause domain crash */
p2m_map_foreign = 14, /* ram pages from foreign domain */
- p2m_mmio_write_dm = 15, /* Read-only; writes go to the device model */
+ p2m_ioreq_server = 15,
} p2m_type_t;
/* Modifiers to the query */
@@ -112,7 +112,7 @@ typedef unsigned int p2m_query_t;
| p2m_to_mask(p2m_ram_ro) \
| p2m_to_mask(p2m_grant_map_ro) \
| p2m_to_mask(p2m_ram_shared) \
- | p2m_to_mask(p2m_mmio_write_dm))
+ | p2m_to_mask(p2m_ioreq_server))
/* Write-discard types, which should discard the write operations */
#define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \
@@ -84,11 +84,12 @@ typedef enum {
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
#if __XEN_INTERFACE_VERSION__ < 0x00040700
- HVMMEM_mmio_write_dm /* Read-only; writes go to the device model */
+ HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */
#else
- HVMMEM_unused /* Placeholder; setting memory to this type
+ HVMMEM_unused, /* Placeholder; setting memory to this type
will fail for code after 4.7.0 */
#endif
+ HVMMEM_ioreq_server
} hvmmem_type_t;
/* Following tools-only interfaces may change in future. */