@@ -2275,14 +2275,17 @@ static bool check_for_vcpu_work(void)
struct vcpu *v = current;
#ifdef CONFIG_IOREQ_SERVER
- bool handled;
+ if ( domain_has_ioreq_server(v->domain) )
+ {
+ bool handled;
- local_irq_enable();
- handled = vcpu_ioreq_handle_completion(v);
- local_irq_disable();
+ local_irq_enable();
+ handled = vcpu_ioreq_handle_completion(v);
+ local_irq_disable();
- if ( !handled )
- return true;
+ if ( !handled )
+ return true;
+ }
#endif
if ( likely(!v->arch.need_flush_to_ram) )
@@ -80,6 +80,22 @@ static ioreq_t *get_ioreq(struct ioreq_server *s, struct vcpu *v)
return &p->vcpu_ioreq[v->vcpu_id];
}
+/*
+ * This should only be used when d == current->domain or when they're
+ * distinct and d is paused. Otherwise the result is stale before
+ * the caller can inspect it.
+ */
+bool domain_has_ioreq_server(const struct domain *d)
+{
+ const struct ioreq_server *s;
+ unsigned int id;
+
+ FOR_EACH_IOREQ_SERVER(d, id, s)
+ return true;
+
+ return false;
+}
+
static struct ioreq_vcpu *get_pending_vcpu(const struct vcpu *v,
struct ioreq_server **srvp)
{
@@ -83,6 +83,8 @@ static inline bool ioreq_needs_completion(const ioreq_t *ioreq)
#define HANDLE_BUFIOREQ(s) \
((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
+bool domain_has_ioreq_server(const struct domain *d);
+
bool vcpu_ioreq_pending(struct vcpu *v);
bool vcpu_ioreq_handle_completion(struct vcpu *v);
bool is_ioreq_server_page(struct domain *d, const struct page_info *page);