@@ -1923,6 +1923,10 @@ int xc_altp2m_switch_to_view(xc_interface *handle, uint32_t domid,
uint16_t view_id);
int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn, bool sve);
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t first_gfn,
+ xen_pfn_t last_gfn, bool sve,
+ xen_pfn_t *error_gfn, int32_t *error_code);
int xc_altp2m_get_suppress_ve(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn, bool *sve);
int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
@@ -234,6 +234,39 @@ int xc_altp2m_set_suppress_ve(xc_interface *handle, uint32_t domid,
return rc;
}
+int xc_altp2m_set_supress_ve_multi(xc_interface *handle, uint32_t domid,
+ uint16_t view_id, xen_pfn_t first_gfn,
+ xen_pfn_t last_gfn, bool sve,
+ xen_pfn_t *error_gfn, int32_t *error_code)
+{
+ int rc;
+ DECLARE_HYPERCALL_BUFFER(xen_hvm_altp2m_op_t, arg);
+
+ arg = xc_hypercall_buffer_alloc(handle, arg, sizeof(*arg));
+ if ( arg == NULL )
+ return -1;
+
+ arg->version = HVMOP_ALTP2M_INTERFACE_VERSION;
+ arg->cmd = HVMOP_altp2m_set_suppress_ve_multi;
+ arg->domain = domid;
+ arg->u.suppress_ve_multi.view = view_id;
+ arg->u.suppress_ve_multi.first_gfn = first_gfn;
+ arg->u.suppress_ve_multi.last_gfn = last_gfn;
+ arg->u.suppress_ve_multi.suppress_ve = sve;
+
+ rc = xencall2(handle->xcall, __HYPERVISOR_hvm_op, HVMOP_altp2m,
+ HYPERCALL_BUFFER_AS_ARG(arg));
+
+ if ( arg->u.suppress_ve_multi.first_error )
+ {
+ *error_gfn = arg->u.suppress_ve_multi.first_error;
+ *error_code = arg->u.suppress_ve_multi.first_error_code;
+ }
+
+ xc_hypercall_buffer_free(handle, arg);
+ return rc;
+}
+
int xc_altp2m_set_mem_access(xc_interface *handle, uint32_t domid,
uint16_t view_id, xen_pfn_t gfn,
xenmem_access_t access)
@@ -4553,6 +4553,7 @@ static int do_altp2m_op(
case HVMOP_altp2m_destroy_p2m:
case HVMOP_altp2m_switch_p2m:
case HVMOP_altp2m_set_suppress_ve:
+ case HVMOP_altp2m_set_suppress_ve_multi:
case HVMOP_altp2m_get_suppress_ve:
case HVMOP_altp2m_set_mem_access:
case HVMOP_altp2m_set_mem_access_multi:
@@ -4711,6 +4712,25 @@ static int do_altp2m_op(
}
break;
+ case HVMOP_altp2m_set_suppress_ve_multi:
+ {
+ uint64_t max_phys_addr = (1UL << d->arch.cpuid->extd.maxphysaddr) - 1;
+
+ a.u.suppress_ve_multi.last_gfn = min(a.u.suppress_ve_multi.last_gfn,
+ max_phys_addr);
+
+ if ( a.u.suppress_ve_multi.pad1 ||
+ a.u.suppress_ve_multi.first_gfn > a.u.suppress_ve_multi.last_gfn )
+ rc = -EINVAL;
+ else
+ {
+ rc = p2m_set_suppress_ve_multi(d, &a.u.suppress_ve_multi);
+ if ( (!rc || rc == -ERESTART) && __copy_to_guest(arg, &a, 1) )
+ rc = -EFAULT;
+ }
+ break;
+ }
+
case HVMOP_altp2m_get_suppress_ve:
if ( a.u.suppress_ve.pad1 || a.u.suppress_ve.pad2 )
rc = -EINVAL;
@@ -3065,6 +3065,70 @@ out:
return rc;
}
+/*
+ * Set/clear the #VE suppress bit for multiple pages. Only available on VMX.
+ */
+int p2m_set_suppress_ve_multi(struct domain *d,
+ struct xen_hvm_altp2m_suppress_ve_multi *sve)
+{
+ struct p2m_domain *host_p2m = p2m_get_hostp2m(d);
+ struct p2m_domain *ap2m = NULL;
+ struct p2m_domain *p2m = host_p2m;
+ uint64_t start = sve->first_gfn;
+ int rc = 0;
+
+ if ( sve->view > 0 )
+ {
+ if ( sve->view >= MAX_ALTP2M ||
+ d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_ALTP2M)] ==
+ mfn_x(INVALID_MFN) )
+ return -EINVAL;
+
+ p2m = ap2m = d->arch.altp2m_p2m[array_index_nospec(sve->view,
+ MAX_ALTP2M)];
+ }
+
+ p2m_lock(host_p2m);
+
+ if ( ap2m )
+ p2m_lock(ap2m);
+
+ while ( sve->last_gfn >= start )
+ {
+ p2m_access_t a;
+ p2m_type_t t;
+ mfn_t mfn;
+ int err = 0;
+
+ if ( altp2m_get_effective_entry(p2m, _gfn(start), &mfn, &t, &a, AP2MGET_query) )
+ a = p2m->default_access;
+
+ if ( (err = p2m->set_entry(p2m, _gfn(start), mfn, PAGE_ORDER_4K, t, a,
+ sve->suppress_ve)) &&
+ !sve->first_error_code )
+ {
+ sve->first_error = start; /* Save the gfn of the first error */
+ sve->first_error_code = err; /* Save the first error code */
+ }
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( sve->last_gfn >= ++start && hypercall_preempt_check() )
+ {
+ rc = -ERESTART;
+ break;
+ }
+ }
+
+ sve->first_gfn = start;
+
+ if ( ap2m )
+ p2m_unlock(ap2m);
+
+ p2m_unlock(host_p2m);
+
+ return rc;
+}
+
int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
unsigned int altp2m_idx)
{
@@ -46,6 +46,16 @@ struct xen_hvm_altp2m_suppress_ve {
uint64_t gfn;
};
+struct xen_hvm_altp2m_suppress_ve_multi {
+ uint16_t view;
+ uint8_t suppress_ve; /* Boolean type. */
+ uint8_t pad1;
+ int32_t first_error_code; /* Must be set to 0 . */
+ uint64_t first_gfn; /* Value will be updated */
+ uint64_t last_gfn;
+ uint64_t first_error; /* Gfn of the first error. Must be set to 0. */
+};
+
#if __XEN_INTERFACE_VERSION__ < 0x00040900
/* Set the logical level of one of a domain's PCI INTx wires. */
@@ -339,6 +349,8 @@ struct xen_hvm_altp2m_op {
#define HVMOP_altp2m_vcpu_disable_notify 13
/* Get the active vcpu p2m index */
#define HVMOP_altp2m_get_p2m_idx 14
+/* Set the "Supress #VE" bit for a range of pages */
+#define HVMOP_altp2m_set_suppress_ve_multi 15
domid_t domain;
uint16_t pad1;
uint32_t pad2;
@@ -353,6 +365,7 @@ struct xen_hvm_altp2m_op {
struct xen_hvm_altp2m_change_gfn change_gfn;
struct xen_hvm_altp2m_set_mem_access_multi set_mem_access_multi;
struct xen_hvm_altp2m_suppress_ve suppress_ve;
+ struct xen_hvm_altp2m_suppress_ve_multi suppress_ve_multi;
struct xen_hvm_altp2m_vcpu_disable_notify disable_notify;
struct xen_hvm_altp2m_get_vcpu_p2m_idx get_vcpu_p2m_idx;
uint8_t pad[64];
@@ -75,6 +75,9 @@ long p2m_set_mem_access_multi(struct domain *d,
int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve,
unsigned int altp2m_idx);
+int p2m_set_suppress_ve_multi(struct domain *d,
+ struct xen_hvm_altp2m_suppress_ve_multi *suppress_ve);
+
int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
unsigned int altp2m_idx);
By default the sve bits are not set. This patch adds a new hypercall, xc_altp2m_set_supress_ve_multi(), to set a range of sve bits. The core function, p2m_set_suppress_ve_multi(), does not brake in case of a error and it is doing a best effort for setting the bits in the given range. A check for continuation is made in order to have preemption on big ranges. The gfn of the first error is stored in xen_hvm_altp2m_suppress_ve_multi.first_error and the error code is stored in xen_hvm_altp2m_suppress_ve_multi.first_error_code. If no error occurred the values will be 0. Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com> --- CC: Ian Jackson <ian.jackson@eu.citrix.com> CC: Wei Liu <wl@xen.org> CC: Andrew Cooper <andrew.cooper3@citrix.com> CC: George Dunlap <George.Dunlap@eu.citrix.com> CC: Jan Beulich <jbeulich@suse.com> CC: Julien Grall <julien@xen.org> CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> CC: Stefano Stabellini <sstabellini@kernel.org> CC: "Roger Pau Monné" <roger.pau@citrix.com> CC: George Dunlap <george.dunlap@eu.citrix.com> CC: Razvan Cojocaru <rcojocaru@bitdefender.com> CC: Tamas K Lengyel <tamas@tklengyel.com> CC: Petre Pircalabu <ppircalabu@bitdefender.com> --- Changes since V4: - Remove ->first_error and first_error_code from HVMOP_altp2m_set_suppress_ve_multi check - Check ->first_error_code so that first_error on gfn 0 can be saved - Chage type of first_error_code to int32_t - Clip ->last_gfn before sanity check. --- tools/libxc/include/xenctrl.h | 4 +++ tools/libxc/xc_altp2m.c | 33 +++++++++++++++++ xen/arch/x86/hvm/hvm.c | 20 +++++++++++ xen/arch/x86/mm/p2m.c | 64 +++++++++++++++++++++++++++++++++ xen/include/public/hvm/hvm_op.h | 13 +++++++ xen/include/xen/mem_access.h | 3 ++ 6 files changed, 137 insertions(+)