@@ -1714,6 +1714,37 @@ int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch,
int is_mmio,
uint64_t start,
uint64_t end);
+/**
+ * This function registers a range of write-protected memory for emulation.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm start start of range
+ * @parm end end of range (inclusive).
+ * @return 0 on success, -1 on failure.
+ */
+int xc_hvm_map_wp_mem_range_to_ioreq_server(xc_interface *xch,
+ domid_t domid,
+ ioservid_t id,
+ xen_pfn_t start,
+ xen_pfn_t end);
+
+/**
+ * This function deregisters a range of write-protected memory for emulation.
+ *
+ * @parm xch a handle to an open hypervisor interface.
+ * @parm domid the domain id to be serviced
+ * @parm id the IOREQ Server id.
+ * @parm start start of range
+ * @parm end end of range (inclusive).
+ * @return 0 on success, -1 on failure.
+ */
+int xc_hvm_unmap_wp_mem_range_from_ioreq_server(xc_interface *xch,
+ domid_t domid,
+ ioservid_t id,
+ xen_pfn_t start,
+ xen_pfn_t end);
/**
* This function registers a PCI device for config space emulation.
@@ -1523,6 +1523,61 @@ int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
return rc;
}
+int xc_hvm_map_wp_mem_range_to_ioreq_server(xc_interface *xch,
+ domid_t domid,
+ ioservid_t id,
+ xen_pfn_t start,
+ xen_pfn_t end)
+{
+ DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
+ int rc;
+
+ arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+ if ( arg == NULL )
+ return -1;
+
+ arg->domid = domid;
+ arg->id = id;
+ arg->type = HVMOP_IO_RANGE_WP_MEM;
+ arg->start = start;
+ arg->end = end;
+
+ rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
+ HVMOP_map_io_range_to_ioreq_server,
+ HYPERCALL_BUFFER_AS_ARG(arg));
+
+ xc_hypercall_buffer_free(xch, arg);
+ return rc;
+}
+
+int xc_hvm_unmap_wp_mem_range_from_ioreq_server(xc_interface *xch,
+ domid_t domid,
+ ioservid_t id,
+ xen_pfn_t start,
+ xen_pfn_t end)
+{
+ DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
+ int rc;
+
+ arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+ if ( arg == NULL )
+ return -1;
+
+ arg->domid = domid;
+ arg->id = id;
+ arg->type = HVMOP_IO_RANGE_WP_MEM;
+ arg->start = start;
+ arg->end = end;
+
+ rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
+ HVMOP_unmap_io_range_from_ioreq_server,
+ HYPERCALL_BUFFER_AS_ARG(arg));
+
+ xc_hypercall_buffer_free(xch, arg);
+ return rc;
+
+}
+
int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
ioservid_t id, uint16_t segment,
uint8_t bus, uint8_t device,
@@ -932,6 +932,9 @@ static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
rangeset_destroy(s->range[i]);
}
+const char *io_range_name[NR_IO_RANGE_TYPES] =
+ {"port", "mmio", "pci", "wp-mem"};
+
static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
bool_t is_default)
{
@@ -946,10 +949,7 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
char *name;
rc = asprintf(&name, "ioreq_server %d %s", s->id,
- (i == HVMOP_IO_RANGE_PORT) ? "port" :
- (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
- (i == HVMOP_IO_RANGE_PCI) ? "pci" :
- "");
+ (i < NR_IO_RANGE_TYPES) ? io_range_name[i] : "");
if ( rc )
goto fail;
@@ -1267,6 +1267,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
case HVMOP_IO_RANGE_PORT:
case HVMOP_IO_RANGE_MEMORY:
case HVMOP_IO_RANGE_PCI:
+ case HVMOP_IO_RANGE_WP_MEM:
r = s->range[type];
break;
@@ -1318,6 +1319,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
case HVMOP_IO_RANGE_PORT:
case HVMOP_IO_RANGE_MEMORY:
case HVMOP_IO_RANGE_PCI:
+ case HVMOP_IO_RANGE_WP_MEM:
r = s->range[type];
break;
@@ -2558,6 +2560,8 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
uint32_t cf8;
uint8_t type;
uint64_t addr;
+ p2m_type_t p2mt;
+ struct page_info *ram_page;
if ( list_empty(&d->arch.hvm_domain.ioreq_server.list) )
return NULL;
@@ -2601,6 +2605,15 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
type = (p->type == IOREQ_TYPE_PIO) ?
HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
addr = p->addr;
+ if ( type == HVMOP_IO_RANGE_MEMORY )
+ {
+ ram_page = get_page_from_gfn(d, p->addr >> PAGE_SHIFT, &p2mt, 0);
+ if ( p2mt == p2m_mmio_write_dm )
+ type = HVMOP_IO_RANGE_WP_MEM;
+
+ if ( ram_page )
+ put_page(ram_page);
+ }
}
list_for_each_entry ( s,
@@ -2642,6 +2655,11 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
}
break;
+ case HVMOP_IO_RANGE_WP_MEM:
+ if ( rangeset_contains_singleton(r, PFN_DOWN(addr)) )
+ return s;
+
+ break;
}
}
@@ -48,7 +48,7 @@ struct hvm_ioreq_vcpu {
bool_t pending;
};
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_WP_MEM + 1)
#define MAX_NR_IO_RANGES 256
struct hvm_ioreq_server {
@@ -333,6 +333,7 @@ struct xen_hvm_io_range {
# define HVMOP_IO_RANGE_PORT 0 /* I/O port range */
# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
# define HVMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
+# define HVMOP_IO_RANGE_WP_MEM 3 /* Write-protected ram range */
uint64_aligned_t start, end; /* IN - inclusive start and end of range */
};
typedef struct xen_hvm_io_range xen_hvm_io_range_t;