@@ -1974,6 +1974,9 @@ int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t old_gfn,
xen_pfn_t new_gfn);
+int xc_altp2m_set_mem_access_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, uint8_t *access,
+ uint64_t *pages, uint32_t nr);
/**
* Mem paging operations.
@@ -213,3 +213,43 @@ int xc_altp2m_change_gfn(xc_interface *handle, uint32_t domid,
return rc;
}
+int xc_altp2m_set_mem_access_multi(xc_interface *xch, uint32_t domid,
+ uint16_t view_id, uint8_t *access,
+ uint64_t *pages, uint32_t nr)
+{
+ int rc;
+
+ DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+ DECLARE_HYPERCALL_BOUNCE(access, nr, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(uint64_t),
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+ arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+ if ( arg == NULL )
+ return -1;
+
+ arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+ arg->cmd = HVMOP_altp2m_set_mem_access_multi;
+ arg->domain = domid;
+ arg->u.set_mem_access_multi.view = view_id;
+ arg->u.set_mem_access_multi.nr = nr;
+
+ if ( xc_hypercall_bounce_pre(xch, pages) ||
+ xc_hypercall_bounce_pre(xch, access) )
+ {
+ PERROR("Could not bounce memory for HVMOP_altp2m_set_mem_access_multi");
+ return -1;
+ }
+
+ set_xen_guest_handle(arg->u.set_mem_access_multi.pfn_list, pages);
+ set_xen_guest_handle(arg->u.set_mem_access_multi.access_list, access);
+
+ rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+ HYPERCALL_BUFFER_AS_ARG(arg));
+
+ xc_hypercall_buffer_free(xch, arg);
+ xc_hypercall_bounce_post(xch, access);
+ xc_hypercall_bounce_post(xch, pages);
+
+ return rc;
+}
@@ -73,6 +73,8 @@
#include <public/arch-x86/cpuid.h>
#include <asm/cpuid.h>
+#include <compat/hvm/hvm_op.h>
+
bool_t __read_mostly hvm_enabled;
#ifdef DBG_LEVEL_0
@@ -4451,6 +4453,7 @@ static int do_altp2m_op(
case HVMOP_altp2m_destroy_p2m:
case HVMOP_altp2m_switch_p2m:
case HVMOP_altp2m_set_mem_access:
+ case HVMOP_altp2m_set_mem_access_multi:
case HVMOP_altp2m_change_gfn:
break;
default:
@@ -4568,6 +4571,37 @@ static int do_altp2m_op(
a.u.set_mem_access.view);
break;
+ case HVMOP_altp2m_set_mem_access_multi:
+ if ( a.u.set_mem_access_multi.pad ||
+ a.u.set_mem_access_multi.opaque >= a.u.set_mem_access_multi.nr )
+ {
+ rc = -EINVAL;
+ break;
+ }
+
+ /*
+ * The mask was set (arbitrary) to 0x3F to match the value used for
+ * MEMOP, despite the fact there are no encoding limitations for the
+ * start parameter.
+ */
+ rc = p2m_set_mem_access_multi(d, a.u.set_mem_access_multi.pfn_list,
+ a.u.set_mem_access_multi.access_list,
+ a.u.set_mem_access_multi.nr,
+ a.u.set_mem_access_multi.opaque,
+ 0x3F,
+ a.u.set_mem_access_multi.view);
+ if ( rc > 0 )
+ {
+ a.u.set_mem_access_multi.opaque = rc;
+ if ( __copy_field_to_guest(guest_handle_cast(arg, xen_hvm_altp2m_op_t),
+ &a, u.set_mem_access_multi.opaque) )
+ rc = -EFAULT;
+ else
+ rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
+ HVMOP_altp2m, arg);
+ }
+ break;
+
case HVMOP_altp2m_change_gfn:
if ( a.u.change_gfn.pad1 || a.u.change_gfn.pad2 )
rc = -EINVAL;
@@ -4586,6 +4620,108 @@ static int do_altp2m_op(
return rc;
}
+DEFINE_XEN_GUEST_HANDLE(compat_hvm_altp2m_op_t);
+
+/*
+ * Manually define the CHECK_ macros for hvm_altp2m_op and
+ * hvm_altp2m_set_mem_access_multi as the generated versions can't handle
+ * correctly the translation of all fields from compat_(*) to xen_(*).
+ */
+#ifndef CHECK_hvm_altp2m_op
+#define CHECK_hvm_altp2m_op \
+ CHECK_SIZE_(struct, hvm_altp2m_op); \
+ CHECK_FIELD_(struct, hvm_altp2m_op, version); \
+ CHECK_FIELD_(struct, hvm_altp2m_op, cmd); \
+ CHECK_FIELD_(struct, hvm_altp2m_op, domain); \
+ CHECK_FIELD_(struct, hvm_altp2m_op, pad1); \
+ CHECK_FIELD_(struct, hvm_altp2m_op, pad2)
+#endif /* CHECK_hvm_altp2m_op */
+
+#ifndef CHECK_hvm_altp2m_set_mem_access_multi
+#define CHECK_hvm_altp2m_set_mem_access_multi \
+ CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, view); \
+ CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, pad); \
+ CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, nr); \
+ CHECK_FIELD_(struct, hvm_altp2m_set_mem_access_multi, opaque)
+#endif /* CHECK_hvm_altp2m_set_mem_access_multi */
+
+CHECK_hvm_altp2m_op;
+CHECK_hvm_altp2m_set_mem_access_multi;
+
+static int compat_altp2m_op(
+ XEN_GUEST_HANDLE_PARAM(void) arg)
+{
+ int rc = 0;
+ struct compat_hvm_altp2m_op a;
+ union
+ {
+ XEN_GUEST_HANDLE_PARAM(void) hnd;
+ struct xen_hvm_altp2m_op *altp2m_op;
+ } nat;
+
+ if ( !hvm_altp2m_supported() )
+ return -EOPNOTSUPP;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( a.pad1 || a.pad2 ||
+ (a.version != HVMOP_ALTP2M_INTERFACE_VERSION) )
+ return -EINVAL;
+
+ set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
+
+ switch ( a.cmd )
+ {
+ case HVMOP_altp2m_set_mem_access_multi:
+#define XLAT_hvm_altp2m_set_mem_access_multi_HNDL_pfn_list(_d_, _s_); \
+ guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list)
+#define XLAT_hvm_altp2m_set_mem_access_multi_HNDL_access_list(_d_, _s_); \
+ guest_from_compat_handle((_d_)->access_list, (_s_)->access_list)
+ XLAT_hvm_altp2m_set_mem_access_multi(&nat.altp2m_op->u.set_mem_access_multi,
+ &a.u.set_mem_access_multi);
+#undef XLAT_hvm_altp2m_set_mem_access_multi_HNDL_pfn_list
+#undef XLAT_hvm_altp2m_set_mem_access_multi_HNDL_access_list
+ break;
+ default:
+ return do_altp2m_op(arg);
+ }
+
+ /* Manually fill the common part of the xen_hvm_altp2m_op structure. */
+ nat.altp2m_op->version = a.version;
+ nat.altp2m_op->cmd = a.cmd;
+ nat.altp2m_op->domain = a.domain;
+ nat.altp2m_op->pad1 = a.pad1;
+ nat.altp2m_op->pad2 = a.pad2;
+
+ rc = do_altp2m_op(nat.hnd);
+
+ switch ( a.cmd )
+ {
+ case HVMOP_altp2m_set_mem_access_multi:
+ /*
+ * The return code can be positive only if it is the return value
+ * of hypercall_create_continuation. In this case, the opaque value
+ * must be copied back to the guest.
+ */
+ if ( rc > 0 )
+ {
+ ASSERT(rc == __HYPERVISOR_hvm_op);
+ a.u.set_mem_access_multi.opaque =
+ nat.altp2m_op->u.set_mem_access_multi.opaque;
+ if ( __copy_field_to_guest(guest_handle_cast(arg,
+ compat_hvm_altp2m_op_t),
+ &a, u.set_mem_access_multi.opaque) )
+ rc = -EFAULT;
+ }
+ break;
+ default:
+ ASSERT_UNREACHABLE();
+ }
+
+ return rc;
+}
+
static int hvmop_get_mem_type(
XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg)
{
@@ -4733,7 +4869,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
case HVMOP_altp2m:
- rc = do_altp2m_op(arg);
+ rc = current->hcall_compat ? compat_altp2m_op(arg) : do_altp2m_op(arg);
break;
default:
@@ -28,6 +28,7 @@ headers-$(CONFIG_X86) += compat/arch-x86/xen.h
headers-$(CONFIG_X86) += compat/arch-x86/xen-$(compat-arch-y).h
headers-$(CONFIG_X86) += compat/hvm/hvm_vcpu.h
headers-$(CONFIG_X86) += compat/hvm/dm_op.h
+headers-$(CONFIG_X86) += compat/hvm/hvm_op.h
headers-y += compat/arch-$(compat-arch-y).h compat/pmu.h compat/xlat.h
headers-$(CONFIG_FLASK) += compat/xsm/flask_op.h
@@ -83,6 +83,13 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5
+/*
+ * hvmmem_type_t should not be defined when generating the corresponding
+ * compat header. This will ensure that the improperly named HVMMEM_(*)
+ * values are defined only once.
+ */
+#ifndef XEN_GENERATING_COMPAT_HEADERS
+
typedef enum {
HVMMEM_ram_rw, /* Normal read/write guest RAM */
HVMMEM_ram_ro, /* Read-only; writes are discarded */
@@ -102,6 +109,8 @@ typedef enum {
to HVMMEM_ram_rw. */
} hvmmem_type_t;
+#endif /* XEN_GENERATING_COMPAT_HEADERS */
+
/* Hint from PV drivers for pagetable destruction. */
#define HVMOP_pagetable_dying 9
struct xen_hvm_pagetable_dying {
@@ -237,6 +246,20 @@ struct xen_hvm_altp2m_set_mem_access {
typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
+struct xen_hvm_altp2m_set_mem_access_multi {
+ /* view */
+ uint16_t view;
+ uint16_t pad;
+ /* Number of pages */
+ uint32_t nr;
+ /* Used for continuation purposes */
+ uint64_t opaque;
+ /* List of pfns to set access for */
+ XEN_GUEST_HANDLE(const_uint64) pfn_list;
+ /* Corresponding list of access settings for pfn_list */
+ XEN_GUEST_HANDLE(const_uint8) access_list;
+};
+
struct xen_hvm_altp2m_change_gfn {
/* view */
uint16_t view;
@@ -268,15 +291,18 @@ struct xen_hvm_altp2m_op {
#define HVMOP_altp2m_set_mem_access 7
/* Change a p2m entry to have a different gfn->mfn mapping */
#define HVMOP_altp2m_change_gfn 8
+/* Set access for an array of pages */
+#define HVMOP_altp2m_set_mem_access_multi 9
domid_t domain;
uint16_t pad1;
uint32_t pad2;
union {
- struct xen_hvm_altp2m_domain_state domain_state;
- struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
- struct xen_hvm_altp2m_view view;
- struct xen_hvm_altp2m_set_mem_access set_mem_access;
- struct xen_hvm_altp2m_change_gfn change_gfn;
+ struct xen_hvm_altp2m_domain_state domain_state;
+ struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
+ struct xen_hvm_altp2m_view view;
+ struct xen_hvm_altp2m_set_mem_access set_mem_access;
+ struct xen_hvm_altp2m_change_gfn change_gfn;
+ struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
uint8_t pad[64];
} u;
};
@@ -71,6 +71,7 @@
? dm_op_set_pci_intx_level hvm/dm_op.h
? dm_op_set_pci_link_route hvm/dm_op.h
? dm_op_track_dirty_vram hvm/dm_op.h
+! hvm_altp2m_set_mem_access_multi hvm/hvm_op.h
? vcpu_hvm_context hvm/hvm_vcpu.h
? vcpu_hvm_x86_32 hvm/hvm_vcpu.h
? vcpu_hvm_x86_64 hvm/hvm_vcpu.h