@@ -1401,7 +1401,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
pg = iorp->va;
if ( !pg )
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
/*
* Return 0 for the cases we can't deal with:
@@ -1431,7 +1431,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
break;
default:
gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
}
spin_lock(&s->bufioreq_lock);
@@ -1441,7 +1441,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
{
/* The queue is full: send the iopacket through the normal path. */
spin_unlock(&s->bufioreq_lock);
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
}
pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
@@ -1472,7 +1472,7 @@ static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
notify_via_xen_event_channel(d, s->bufioreq_evtchn);
spin_unlock(&s->bufioreq_lock);
- return X86EMUL_OKAY;
+ return IOREQ_STATUS_HANDLED;
}
int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
@@ -1488,7 +1488,7 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
return hvm_send_buffered_ioreq(s, proto_p);
if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
- return X86EMUL_RETRY;
+ return IOREQ_STATUS_RETRY;
list_for_each_entry ( sv,
&s->ioreq_vcpu_list,
@@ -1528,11 +1528,11 @@ int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
notify_via_xen_event_channel(d, port);
sv->pending = true;
- return X86EMUL_RETRY;
+ return IOREQ_STATUS_RETRY;
}
}
- return X86EMUL_UNHANDLEABLE;
+ return IOREQ_STATUS_UNHANDLED;
}
unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
@@ -1546,7 +1546,7 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
if ( !s->enabled )
continue;
- if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
+ if ( hvm_send_ioreq(s, p, buffered) == IOREQ_STATUS_UNHANDLED )
failed++;
}
@@ -55,6 +55,11 @@ unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered);
void hvm_ioreq_init(struct domain *d);
+/* This correlation must not be altered */
+#define IOREQ_STATUS_HANDLED X86EMUL_OKAY
+#define IOREQ_STATUS_UNHANDLED X86EMUL_UNHANDLEABLE
+#define IOREQ_STATUS_RETRY X86EMUL_RETRY
+
#endif /* __ASM_X86_HVM_IOREQ_H__ */
/*