@@ -1993,6 +1993,7 @@ int xc_get_mem_access(xc_interface *xch, uint32_t domain_id,
* Returns the VM_EVENT_INTERFACE version.
*/
int xc_vm_event_get_version(xc_interface *xch);
+int xc_vm_event_ng_get_version(xc_interface *xch);
/***
* Monitor control operations.
@@ -2007,6 +2008,11 @@ int xc_vm_event_get_version(xc_interface *xch);
void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port);
int xc_monitor_disable(xc_interface *xch, uint32_t domain_id);
int xc_monitor_resume(xc_interface *xch, uint32_t domain_id);
+
+/* Monitor NG interface */
+int xc_monitor_ng_create(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_ng_destroy(xc_interface *xch, uint32_t domain_id);
+int xc_monitor_ng_set_state(xc_interface *xch, uint32_t domain_id, bool enabled);
/*
* Get a bitmap of supported monitor events in the form
* (1 << XEN_DOMCTL_MONITOR_EVENT_*).
@@ -265,6 +265,21 @@ int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
return do_domctl(xch, &domctl);
}
+int xc_monitor_ng_create(xc_interface *xch, uint32_t domain_id)
+{
+ return xc_vm_event_ng_create(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR);
+}
+
+int xc_monitor_ng_destroy(xc_interface *xch, uint32_t domain_id)
+{
+ return xc_vm_event_ng_destroy(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR);
+}
+
+int xc_monitor_ng_set_state(xc_interface *xch, uint32_t domain_id, bool enabled)
+{
+ return xc_vm_event_ng_set_state(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR, enabled);
+}
+
/*
* Local variables:
* mode: C
@@ -420,6 +420,14 @@ int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
uint32_t *port);
+/**
+ * VM_EVENT NG operations. Internal use only.
+ */
+int xc_vm_event_ng_create(xc_interface *xch, uint32_t domain_id, int type);
+int xc_vm_event_ng_destroy(xc_interface *xch, uint32_t domain_id, int type);
+int xc_vm_event_ng_set_state(xc_interface *xch, uint32_t domain_id, int type, bool enabled);
+
+
int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
#endif /* __XC_PRIVATE_H__ */
@@ -154,6 +154,59 @@ int xc_vm_event_get_version(xc_interface *xch)
return rc;
}
+int xc_vm_event_ng_get_version(xc_interface *xch)
+{
+ DECLARE_DOMCTL;
+ int rc;
+
+ domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+ domctl.domain = DOMID_INVALID;
+ domctl.u.vm_event_op.op = XEN_VM_EVENT_NG_GET_VERSION;
+ domctl.u.vm_event_op.type = XEN_VM_EVENT_TYPE_MONITOR;
+
+ rc = do_domctl(xch, &domctl);
+ if ( !rc )
+ rc = domctl.u.vm_event_ng_op.u.version;
+ return rc;
+}
+
+int xc_vm_event_ng_create(xc_interface *xch, uint32_t domain_id, int type)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+ domctl.domain = domain_id;
+ domctl.u.vm_event_ng_op.op = XEN_VM_EVENT_NG_CREATE;
+ domctl.u.vm_event_ng_op.type = type;
+
+ return do_domctl(xch, &domctl);
+}
+
+int xc_vm_event_ng_destroy(xc_interface *xch, uint32_t domain_id, int type)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+ domctl.domain = domain_id;
+ domctl.u.vm_event_ng_op.op = XEN_VM_EVENT_NG_DESTROY;
+ domctl.u.vm_event_ng_op.type = type;
+
+ return do_domctl(xch, &domctl);
+}
+
+int xc_vm_event_ng_set_state(xc_interface *xch, uint32_t domain_id, int type, bool enabled)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_vm_event_ng_op;
+ domctl.domain = domain_id;
+ domctl.u.vm_event_ng_op.op = XEN_VM_EVENT_NG_SET_STATE;
+ domctl.u.vm_event_ng_op.type = type;
+ domctl.u.vm_event_ng_op.u.enabled = enabled;
+
+ return do_domctl(xch, &domctl);
+}
+
/*
* Local variables:
* mode: C
@@ -119,6 +119,7 @@
#include <xen/efi.h>
#include <xen/grant_table.h>
#include <xen/hypercall.h>
+#include <xen/vm_event.h>
#include <asm/paging.h>
#include <asm/shadow.h>
#include <asm/page.h>
@@ -4584,6 +4585,10 @@ int arch_acquire_resource(struct domain *d, unsigned int type,
}
#endif
+ case XENMEM_resource_vm_event:
+ rc = vm_event_ng_get_frames(d, id, frame, nr_frames, mfn_list);
+ break;
+
default:
rc = -EOPNOTSUPP;
break;
@@ -59,6 +59,7 @@ obj-y += trace.o
obj-y += version.o
obj-y += virtual_region.o
obj-y += vm_event.o
+obj-y += vm_event_ng.o
obj-y += vmap.o
obj-y += vsprintf.o
obj-y += wait.o
@@ -393,6 +393,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
case XEN_DOMCTL_test_assign_device:
case XEN_DOMCTL_vm_event_op:
+ case XEN_DOMCTL_vm_event_ng_op:
if ( op->domain == DOMID_INVALID )
{
case XEN_DOMCTL_createdomain:
@@ -1023,6 +1024,12 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
copyback = 1;
break;
+ case XEN_DOMCTL_vm_event_ng_op:
+ ret = vm_event_ng_domctl(d, &op->u.vm_event_ng_op,
+ guest_handle_cast(u_domctl, void));
+ copyback = 1;
+ break;
+
#ifdef CONFIG_MEM_ACCESS
case XEN_DOMCTL_set_access_required:
if ( unlikely(current->domain == d) ) /* no domain_pause() */
@@ -380,6 +380,51 @@ static int vm_event_ring_get_response(struct vm_event_ring_domain *impl,
return 1;
}
+void vm_event_handle_response(struct domain *d, struct vcpu *v,
+ vm_event_response_t *rsp)
+{
+ /* Check flags which apply only when the vCPU is paused */
+ if ( atomic_read(&v->vm_event_pause_count) )
+ {
+#ifdef CONFIG_HAS_MEM_PAGING
+ if ( rsp->reason == VM_EVENT_REASON_MEM_PAGING )
+ p2m_mem_paging_resume(d, rsp);
+#endif
+
+ /*
+ * Check emulation flags in the arch-specific handler only, as it
+ * has to set arch-specific flags when supported, and to avoid
+ * bitmask overhead when it isn't supported.
+ */
+ vm_event_emulate_check(v, rsp);
+
+ /*
+ * Check in arch-specific handler to avoid bitmask overhead when
+ * not supported.
+ */
+ vm_event_register_write_resume(v, rsp);
+
+ /*
+ * Check in arch-specific handler to avoid bitmask overhead when
+ * not supported.
+ */
+ vm_event_toggle_singlestep(d, v, rsp);
+
+ /* Check for altp2m switch */
+ if ( rsp->flags & VM_EVENT_FLAG_ALTERNATE_P2M )
+ p2m_altp2m_check(v, rsp->altp2m_idx);
+
+ if ( rsp->flags & VM_EVENT_FLAG_SET_REGISTERS )
+ vm_event_set_registers(v, rsp);
+
+ if ( rsp->flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
+ vm_event_monitor_next_interrupt(v);
+
+ if ( rsp->flags & VM_EVENT_FLAG_VCPU_PAUSED )
+ vm_event_vcpu_unpause(v);
+ }
+}
+
/*
* Pull all responses from the given ring and unpause the corresponding vCPU
* if required. Based on the response type, here we can also call custom
@@ -427,47 +472,7 @@ static int vm_event_ring_resume(struct vm_event_ring_domain *impl)
* In some cases the response type needs extra handling, so here
* we call the appropriate handlers.
*/
-
- /* Check flags which apply only when the vCPU is paused */
- if ( atomic_read(&v->vm_event_pause_count) )
- {
-#ifdef CONFIG_HAS_MEM_PAGING
- if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
- p2m_mem_paging_resume(impl->ved.d, &rsp);
-#endif
-
- /*
- * Check emulation flags in the arch-specific handler only, as it
- * has to set arch-specific flags when supported, and to avoid
- * bitmask overhead when it isn't supported.
- */
- vm_event_emulate_check(v, &rsp);
-
- /*
- * Check in arch-specific handler to avoid bitmask overhead when
- * not supported.
- */
- vm_event_register_write_resume(v, &rsp);
-
- /*
- * Check in arch-specific handler to avoid bitmask overhead when
- * not supported.
- */
- vm_event_toggle_singlestep(impl->ved.d, v, &rsp);
-
- /* Check for altp2m switch */
- if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
- p2m_altp2m_check(v, rsp.altp2m_idx);
-
- if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
- vm_event_set_registers(v, &rsp);
-
- if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
- vm_event_monitor_next_interrupt(v);
-
- if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
- vm_event_vcpu_unpause(v);
- }
+ vm_event_handle_response(impl->ved.d, v, &rsp);
}
return 0;
@@ -709,9 +714,10 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
rc = arch_monitor_init_domain(d);
if ( rc )
break;
- rc = vm_event_ring_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
- HVM_PARAM_MONITOR_RING_PFN,
- monitor_notification);
+ rc = vm_event_ring_enable(d, vec, &d->vm_event_monitor,
+ _VPF_mem_access,
+ HVM_PARAM_MONITOR_RING_PFN,
+ monitor_notification);
break;
case XEN_VM_EVENT_DISABLE:
new file mode 100644
@@ -0,0 +1,449 @@
+/******************************************************************************
+ * vm_event_ng.c
+ *
+ * VM event support (new generation).
+ *
+ * Copyright (c) 2019, Bitdefender S.R.L.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/sched.h>
+#include <xen/event.h>
+#include <xen/vm_event.h>
+#include <xen/vmap.h>
+#include <asm/monitor.h>
+#include <asm/vm_event.h>
+#include <xsm/xsm.h>
+
+#define to_channels(_ved) container_of((_ved), \
+ struct vm_event_channels_domain, ved)
+
+#define VM_EVENT_CHANNELS_ENABLED 1
+
+struct vm_event_channels_domain
+{
+ /* VM event domain */
+ struct vm_event_domain ved;
+ /* shared channels buffer */
+ struct vm_event_slot *slots;
+ /* the buffer size (number of frames) */
+ unsigned int nr_frames;
+ /* state */
+ bool enabled;
+ /* buffer's mnf list */
+ mfn_t mfn[0];
+};
+
+static const struct vm_event_ops vm_event_channels_ops;
+
+static int vm_event_channels_alloc_buffer(struct vm_event_channels_domain *impl)
+{
+ int i, rc = -ENOMEM;
+
+ for ( i = 0; i < impl->nr_frames; i++ )
+ {
+ struct page_info *page = alloc_domheap_page(impl->ved.d, 0);
+ if ( !page )
+ goto err;
+
+ if ( !get_page_and_type(page, impl->ved.d, PGT_writable_page) )
+ {
+ rc = -ENODATA;
+ goto err;
+ }
+
+ impl->mfn[i] = page_to_mfn(page);
+ }
+
+ impl->slots = (struct vm_event_slot *)vmap(impl->mfn, impl->nr_frames);
+ if ( !impl->slots )
+ goto err;
+
+ for ( i = 0; i < impl->nr_frames; i++ )
+ clear_page((void*)impl->slots + i * PAGE_SIZE);
+
+ return 0;
+
+err:
+ while ( --i >= 0 )
+ {
+ struct page_info *page = mfn_to_page(impl->mfn[i]);
+
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+ put_page_and_type(page);
+ }
+
+ return rc;
+}
+
+static void vm_event_channels_free_buffer(struct vm_event_channels_domain *impl)
+{
+ int i;
+
+ ASSERT(impl);
+
+ if ( !impl->slots )
+ return;
+
+ vunmap(impl->slots);
+
+ for ( i = 0; i < impl->nr_frames; i++ )
+ {
+ struct page_info *page = mfn_to_page(impl->mfn[i]);
+
+ ASSERT(page);
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+ put_page_and_type(page);
+ }
+}
+
+static int vm_event_channels_create(
+ struct domain *d,
+ struct xen_domctl_vm_event_ng_op *vec,
+ struct vm_event_domain **_ved,
+ int pause_flag,
+ xen_event_channel_notification_t notification_fn)
+{
+ int rc, i;
+ unsigned int nr_frames = PFN_UP(d->max_vcpus * sizeof(struct vm_event_slot));
+ struct vm_event_channels_domain *impl;
+
+ if ( *_ved )
+ return -EBUSY;
+
+ impl = _xzalloc(sizeof(struct vm_event_channels_domain) +
+ nr_frames * sizeof(mfn_t),
+ __alignof__(struct vm_event_channels_domain));
+ if ( unlikely(!impl) )
+ return -ENOMEM;
+
+ spin_lock_init(&impl->ved.lock);
+ spin_lock(&impl->ved.lock);
+
+ impl->nr_frames = nr_frames;
+ impl->ved.d = d;
+ impl->ved.ops = &vm_event_channels_ops;
+
+ rc = vm_event_init_domain(d);
+ if ( rc < 0 )
+ goto err;
+
+ rc = vm_event_channels_alloc_buffer(impl);
+ if ( rc )
+ goto err;
+
+ for ( i = 0; i < d->max_vcpus; i++ )
+ {
+ rc = alloc_unbound_xen_event_channel(d, i, current->domain->domain_id,
+ notification_fn);
+ if ( rc < 0 )
+ goto err;
+
+ impl->slots[i].port = rc;
+ impl->slots[i].state = STATE_VM_EVENT_SLOT_IDLE;
+ }
+
+ impl->enabled = false;
+
+ spin_unlock(&impl->ved.lock);
+ *_ved = &impl->ved;
+ return 0;
+
+err:
+ spin_unlock(&impl->ved.lock);
+ XFREE(impl);
+ return rc;
+}
+
+static int vm_event_channels_destroy(struct vm_event_domain **_ved)
+{
+ struct vcpu *v;
+ struct vm_event_channels_domain *impl = to_channels(*_ved);
+ int i;
+
+ spin_lock(&(*_ved)->lock);
+
+ for_each_vcpu( (*_ved)->d, v )
+ {
+ if ( atomic_read(&v->vm_event_pause_count) )
+ vm_event_vcpu_unpause(v);
+ }
+
+ for ( i = 0; i < (*_ved)->d->max_vcpus; i++ )
+ evtchn_close((*_ved)->d, impl->slots[i].port, 0);
+
+ vm_event_channels_free_buffer(impl);
+ spin_unlock(&(*_ved)->lock);
+ XFREE(*_ved);
+
+ return 0;
+}
+
+static bool vm_event_channels_check(struct vm_event_domain *ved)
+{
+ return to_channels(ved)->slots != NULL;
+}
+
+static void vm_event_channels_cleanup(struct vm_event_domain **_ved)
+{
+ vm_event_channels_destroy(_ved);
+}
+
+static int vm_event_channels_claim_slot(struct vm_event_domain *ved,
+ bool allow_sleep)
+{
+ return 0;
+}
+
+static void vm_event_channels_cancel_slot(struct vm_event_domain *ved)
+{
+}
+
+static void vm_event_channels_put_request(struct vm_event_domain *ved,
+ vm_event_request_t *req)
+{
+ struct vm_event_channels_domain *impl = to_channels(ved);
+ struct vm_event_slot *slot;
+
+ /* exit if the vm_event_domain was not specifically enabled */
+ if ( !impl->enabled )
+ return;
+
+ ASSERT( req->vcpu_id >= 0 && req->vcpu_id < ved->d->max_vcpus );
+
+ slot = &impl->slots[req->vcpu_id];
+
+ if ( current->domain != ved->d )
+ {
+ req->flags |= VM_EVENT_FLAG_FOREIGN;
+#ifndef NDEBUG
+ if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
+ gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
+ ved->d->domain_id, req->vcpu_id);
+#endif
+ }
+
+ req->version = VM_EVENT_INTERFACE_VERSION;
+
+ spin_lock(&impl->ved.lock);
+ if ( slot->state != STATE_VM_EVENT_SLOT_IDLE )
+ {
+ gdprintk(XENLOG_G_WARNING, "The VM event slot for d%dv%d is not IDLE.\n",
+ impl->ved.d->domain_id, req->vcpu_id);
+ spin_unlock(&impl->ved.lock);
+ return;
+ }
+
+ slot->u.req = *req;
+ slot->state = STATE_VM_EVENT_SLOT_SUBMIT;
+ spin_unlock(&impl->ved.lock);
+ notify_via_xen_event_channel(impl->ved.d, slot->port);
+}
+
+static int vm_event_channels_get_response(struct vm_event_channels_domain *impl,
+ struct vcpu *v, vm_event_response_t *rsp)
+{
+ struct vm_event_slot *slot = &impl->slots[v->vcpu_id];
+
+ ASSERT( slot != NULL );
+ spin_lock(&impl->ved.lock);
+
+ if ( slot->state != STATE_VM_EVENT_SLOT_FINISH )
+ {
+ gdprintk(XENLOG_G_WARNING, "The VM event slot state for d%dv%d is invalid.\n",
+ impl->ved.d->domain_id, v->vcpu_id);
+ spin_unlock(&impl->ved.lock);
+ return -1;
+ }
+
+ *rsp = slot->u.rsp;
+ slot->state = STATE_VM_EVENT_SLOT_IDLE;
+
+ spin_unlock(&impl->ved.lock);
+ return 0;
+}
+
+static int vm_event_channels_resume(struct vm_event_channels_domain *impl,
+ struct vcpu *v)
+{
+ vm_event_response_t rsp;
+
+ if ( unlikely(!impl || !vm_event_check(&impl->ved)) )
+ return -ENODEV;
+
+ ASSERT(impl->ved.d != current->domain);
+
+ if ( vm_event_channels_get_response(impl, v, &rsp) ||
+ rsp.version != VM_EVENT_INTERFACE_VERSION ||
+ rsp.vcpu_id != v->vcpu_id )
+ return -1;
+
+ vm_event_handle_response(impl->ved.d, v, &rsp);
+
+ return 0;
+}
+
+/* Registered with Xen-bound event channel for incoming notifications. */
+static void monitor_notification(struct vcpu *v, unsigned int port)
+{
+ vm_event_channels_resume(to_channels(v->domain->vm_event_monitor), v);
+}
+
+int vm_event_ng_domctl(struct domain *d, struct xen_domctl_vm_event_ng_op *vec,
+ XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+{
+ int rc;
+
+ if ( vec->op == XEN_VM_EVENT_NG_GET_VERSION )
+ {
+ vec->u.version = VM_EVENT_INTERFACE_VERSION;
+ return 0;
+ }
+
+ if ( unlikely(d == NULL) )
+ return -ESRCH;
+
+ rc = xsm_vm_event_control(XSM_PRIV, d, vec->type, vec->op);
+ if ( rc )
+ return rc;
+
+ if ( unlikely(d == current->domain) ) /* no domain_pause() */
+ {
+ gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
+ return -EINVAL;
+ }
+
+ if ( unlikely(d->is_dying) )
+ {
+ gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
+ d->domain_id);
+ return 0;
+ }
+
+ if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
+ {
+ gdprintk(XENLOG_INFO,
+ "Memory event op on a domain (%u) with no vcpus\n",
+ d->domain_id);
+ return -EINVAL;
+ }
+
+ switch ( vec->type )
+ {
+ case XEN_VM_EVENT_TYPE_MONITOR:
+ {
+ rc = -EINVAL;
+
+ switch ( vec-> op)
+ {
+ case XEN_VM_EVENT_NG_CREATE:
+ /* domain_pause() not required here, see XSA-99 */
+ rc = arch_monitor_init_domain(d);
+ if ( rc )
+ break;
+ rc = vm_event_channels_create(d, vec, &d->vm_event_monitor,
+ _VPF_mem_access, monitor_notification);
+ break;
+
+ case XEN_VM_EVENT_NG_DESTROY:
+ if ( !vm_event_check(d->vm_event_monitor) )
+ break;
+ domain_pause(d);
+ rc = vm_event_channels_destroy(&d->vm_event_monitor);
+ arch_monitor_cleanup_domain(d);
+ domain_unpause(d);
+ break;
+
+ case XEN_VM_EVENT_NG_SET_STATE:
+ if ( !vm_event_check(d->vm_event_monitor) )
+ break;
+ domain_pause(d);
+ to_channels(d->vm_event_monitor)->enabled = !!vec->u.enabled;
+ domain_unpause(d);
+ rc = 0;
+ break;
+
+ default:
+ rc = -ENOSYS;
+ }
+ break;
+ }
+
+#ifdef CONFIG_HAS_MEM_PAGING
+ case XEN_VM_EVENT_TYPE_PAGING:
+#endif
+
+#ifdef CONFIG_HAS_MEM_SHARING
+ case XEN_VM_EVENT_TYPE_SHARING:
+#endif
+
+ default:
+ rc = -ENOSYS;
+ }
+
+ return rc;
+}
+
+int vm_event_ng_get_frames(struct domain *d, unsigned int id,
+ unsigned long frame, unsigned int nr_frames,
+ xen_pfn_t mfn_list[])
+{
+ struct vm_event_domain *ved;
+ int i;
+
+ switch (id )
+ {
+ case XEN_VM_EVENT_TYPE_MONITOR:
+ ved = d->vm_event_monitor;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ if ( !vm_event_check(ved) )
+ return -EINVAL;
+
+ if ( frame != 0 || nr_frames != to_channels(ved)->nr_frames )
+ return -EINVAL;
+
+ spin_lock(&ved->lock);
+
+ for ( i = 0; i < to_channels(ved)->nr_frames; i++ )
+ mfn_list[i] = mfn_x(to_channels(ved)->mfn[i]);
+
+ spin_unlock(&ved->lock);
+ return 0;
+}
+
+static const struct vm_event_ops vm_event_channels_ops = {
+ .check = vm_event_channels_check,
+ .cleanup = vm_event_channels_cleanup,
+ .claim_slot = vm_event_channels_claim_slot,
+ .cancel_slot = vm_event_channels_cancel_slot,
+ .put_request = vm_event_channels_put_request
+};
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -792,6 +792,24 @@ struct xen_domctl_vm_event_op {
};
/*
+ * XEN_DOMCTL_vm_event_ng_op.
+ * Next Generation vm_event operations.
+ */
+#define XEN_VM_EVENT_NG_CREATE 0
+#define XEN_VM_EVENT_NG_DESTROY 1
+#define XEN_VM_EVENT_NG_SET_STATE 2
+#define XEN_VM_EVENT_NG_GET_VERSION 3
+
+struct xen_domctl_vm_event_ng_op {
+ uint32_t op; /* XEN_VM_EVENT_NG_* */
+ uint32_t type; /* XEN_VM_EVENT_TYPE_* */
+ union {
+ uint32_t version; /* OUT: version number */
+ uint8_t enabled; /* IN: state */
+ } u;
+};
+
+/*
* Memory sharing operations
*/
/* XEN_DOMCTL_mem_sharing_op.
@@ -1142,6 +1160,7 @@ struct xen_domctl {
/* #define XEN_DOMCTL_set_gnttab_limits 80 - Moved into XEN_DOMCTL_createdomain */
#define XEN_DOMCTL_vuart_op 81
#define XEN_DOMCTL_get_cpu_policy 82
+#define XEN_DOMCTL_vm_event_ng_op 83
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1183,6 +1202,7 @@ struct xen_domctl {
struct xen_domctl_subscribe subscribe;
struct xen_domctl_debug_op debug_op;
struct xen_domctl_vm_event_op vm_event_op;
+ struct xen_domctl_vm_event_ng_op vm_event_ng_op;
struct xen_domctl_mem_sharing_op mem_sharing_op;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
@@ -612,6 +612,7 @@ struct xen_mem_acquire_resource {
#define XENMEM_resource_ioreq_server 0
#define XENMEM_resource_grant_table 1
+#define XENMEM_resource_vm_event 2
/*
* IN - a type-specific resource identifier, which must be zero
@@ -619,6 +620,7 @@ struct xen_mem_acquire_resource {
*
* type == XENMEM_resource_ioreq_server -> id == ioreq server id
* type == XENMEM_resource_grant_table -> id defined below
+ * type == XENMEM_resource_vm_event -> id == vm_event type
*/
uint32_t id;
@@ -421,6 +421,22 @@ typedef struct vm_event_st {
DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
+/* VM Event slot state */
+#define STATE_VM_EVENT_SLOT_IDLE 0 /* the slot data is invalid */
+#define STATE_VM_EVENT_SLOT_SUBMIT 1 /* a request was submitted */
+#define STATE_VM_EVENT_SLOT_FINISH 2 /* a response was issued */
+
+struct vm_event_slot
+{
+ uint32_t port; /* evtchn for notifications to/from helper */
+ uint32_t state:4;
+ uint32_t pad:28;
+ union {
+ vm_event_request_t req;
+ vm_event_response_t rsp;
+ } u;
+};
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#endif /* _XEN_PUBLIC_VM_EVENT_H */
@@ -110,6 +110,13 @@ static inline void vm_event_put_request(struct vm_event_domain *ved,
int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+int vm_event_ng_domctl(struct domain *d, struct xen_domctl_vm_event_ng_op *vec,
+ XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+
+int vm_event_ng_get_frames(struct domain *d, unsigned int id,
+ unsigned long frame, unsigned int nr_frames,
+ xen_pfn_t mfn_list[]);
+
void vm_event_vcpu_pause(struct vcpu *v);
void vm_event_vcpu_unpause(struct vcpu *v);
@@ -118,6 +125,9 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
void vm_event_monitor_next_interrupt(struct vcpu *v);
+void vm_event_handle_response(struct domain *d, struct vcpu *v,
+ vm_event_response_t *rsp);
+
#endif /* __VM_EVENT_H__ */
/*
In high throughput introspection scenarios where lots of monitor vm_events are generated, the ring buffer can fill up before the monitor application gets a chance to handle all the requests thus blocking other vcpus which will have to wait for a slot to become available. This patch adds support for a different mechanism to handle synchronous vm_event requests / responses. As each synchronous request pauses the vcpu until the corresponding response is handled, it can be stored in a slotted memory buffer (one per vcpu) shared between the hypervisor and the controlling domain. Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com> --- tools/libxc/include/xenctrl.h | 6 + tools/libxc/xc_monitor.c | 15 ++ tools/libxc/xc_private.h | 8 + tools/libxc/xc_vm_event.c | 53 +++++ xen/arch/x86/mm.c | 5 + xen/common/Makefile | 1 + xen/common/domctl.c | 7 + xen/common/vm_event.c | 94 ++++----- xen/common/vm_event_ng.c | 449 ++++++++++++++++++++++++++++++++++++++++++ xen/include/public/domctl.h | 20 ++ xen/include/public/memory.h | 2 + xen/include/public/vm_event.h | 16 ++ xen/include/xen/vm_event.h | 10 + 13 files changed, 642 insertions(+), 44 deletions(-) create mode 100644 xen/common/vm_event_ng.c