@@ -290,7 +290,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
}
/* Otherwise, check if there is a vm_event monitor subscriber */
- if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
+ if ( !vm_event_check(v->domain->vm_event_monitor) )
{
/* No listener */
if ( p2m->access_required )
@@ -182,7 +182,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
gfn_unlock(p2m, gfn, 0);
/* Otherwise, check if there is a memory event listener, and send the message along */
- if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
+ if ( !vm_event_check(d->vm_event_monitor) || !req_ptr )
{
/* No listener */
if ( p2m->access_required )
@@ -210,7 +210,7 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
return true;
}
}
- if ( vm_event_check_ring(d->vm_event_monitor) &&
+ if ( vm_event_check(d->vm_event_monitor) &&
d->arch.monitor.inguest_pagefault_disabled &&
npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
{
@@ -44,7 +44,7 @@ int mem_paging_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_paging_op_t) arg)
goto out;
rc = -ENODEV;
- if ( unlikely(!vm_event_check_ring(d->vm_event_paging)) )
+ if ( unlikely(!vm_event_check(d->vm_event_paging)) )
goto out;
switch( mpo.op )
@@ -52,7 +52,7 @@ int mem_access_memop(unsigned long cmd,
goto out;
rc = -ENODEV;
- if ( unlikely(!vm_event_check_ring(d->vm_event_monitor)) )
+ if ( unlikely(!vm_event_check(d->vm_event_monitor)) )
goto out;
switch ( mao.op )
@@ -196,7 +196,7 @@ void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
static int vm_event_disable(struct domain *d, struct vm_event_domain **ved)
{
- if ( vm_event_check_ring(*ved) )
+ if ( vm_event_check(*ved) )
{
struct vcpu *v;
@@ -277,7 +277,7 @@ void vm_event_put_request(struct domain *d,
RING_IDX req_prod;
struct vcpu *curr = current;
- if( !vm_event_check_ring(ved))
+ if( !vm_event_check(ved))
return;
if ( curr->domain != d )
@@ -380,7 +380,7 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
*/
ASSERT(d != current->domain);
- if ( unlikely(!vm_event_check_ring(ved)) )
+ if ( unlikely(!vm_event_check(ved)) )
return -ENODEV;
/* Pull all responses off the ring. */
@@ -452,7 +452,7 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
{
- if( !vm_event_check_ring(ved) )
+ if( !vm_event_check(ved) )
return;
vm_event_ring_lock(ved);
@@ -501,7 +501,7 @@ static int vm_event_wait_slot(struct vm_event_domain *ved)
return rc;
}
-bool vm_event_check_ring(struct vm_event_domain *ved)
+bool vm_event_check(struct vm_event_domain *ved)
{
return (ved && ved->ring_page);
}
@@ -521,7 +521,7 @@ bool vm_event_check_ring(struct vm_event_domain *ved)
int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
bool allow_sleep)
{
- if ( !vm_event_check_ring(ved) )
+ if ( !vm_event_check(ved) )
return -EOPNOTSUPP;
if ( (current->domain == d) && allow_sleep )
@@ -556,7 +556,7 @@ static void mem_sharing_notification(struct vcpu *v, unsigned int port)
void vm_event_cleanup(struct domain *d)
{
#ifdef CONFIG_HAS_MEM_PAGING
- if ( vm_event_check_ring(d->vm_event_paging) )
+ if ( vm_event_check(d->vm_event_paging) )
{
/* Destroying the wait queue head means waking up all
* queued vcpus. This will drain the list, allowing
@@ -569,13 +569,13 @@ void vm_event_cleanup(struct domain *d)
(void)vm_event_disable(d, &d->vm_event_paging);
}
#endif
- if ( vm_event_check_ring(d->vm_event_monitor) )
+ if ( vm_event_check(d->vm_event_monitor) )
{
destroy_waitqueue_head(&d->vm_event_monitor->wq);
(void)vm_event_disable(d, &d->vm_event_monitor);
}
#ifdef CONFIG_HAS_MEM_SHARING
- if ( vm_event_check_ring(d->vm_event_share) )
+ if ( vm_event_check(d->vm_event_share) )
{
destroy_waitqueue_head(&d->vm_event_share->wq);
(void)vm_event_disable(d, &d->vm_event_share);
@@ -663,7 +663,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_paging) )
+ if ( vm_event_check(d->vm_event_paging) )
{
domain_pause(d);
rc = vm_event_disable(d, &d->vm_event_paging);
@@ -700,7 +700,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_monitor) )
+ if ( vm_event_check(d->vm_event_monitor) )
{
domain_pause(d);
rc = vm_event_disable(d, &d->vm_event_monitor);
@@ -745,7 +745,7 @@ int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
break;
case XEN_VM_EVENT_DISABLE:
- if ( vm_event_check_ring(d->vm_event_share) )
+ if ( vm_event_check(d->vm_event_share) )
{
domain_pause(d);
rc = vm_event_disable(d, &d->vm_event_share);
@@ -1453,7 +1453,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
if ( unlikely(d->arch.hvm.mem_sharing_enabled ||
- vm_event_check_ring(d->vm_event_paging) ||
+ vm_event_check(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty) )
return -EXDEV;
@@ -29,8 +29,8 @@
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d);
-/* Returns whether a ring has been set up */
-bool vm_event_check_ring(struct vm_event_domain *ved);
+/* Returns whether the VM event domain has been set up */
+bool vm_event_check(struct vm_event_domain *ved);
/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
* available space and the caller is a foreign domain. If the guest itself
Decouple implementation from interface to allow vm_event_check to be used regardless of the vm_event underlying implementation. Signed-off-by: Petre Pircalabu <ppircalabu@bitdefender.com> --- xen/arch/arm/mem_access.c | 2 +- xen/arch/x86/mm/mem_access.c | 4 ++-- xen/arch/x86/mm/mem_paging.c | 2 +- xen/common/mem_access.c | 2 +- xen/common/vm_event.c | 24 ++++++++++++------------ xen/drivers/passthrough/pci.c | 2 +- xen/include/xen/vm_event.h | 4 ++-- 7 files changed, 20 insertions(+), 20 deletions(-)