@@ -97,7 +97,7 @@ void cpu_list_remove(CPUState *cpu)
}
struct qemu_work_item {
- struct qemu_work_item *next;
+ QSIMPLEQ_ENTRY(qemu_work_item) node;
run_on_cpu_func func;
run_on_cpu_data data;
bool free, exclusive, done;
@@ -106,13 +106,7 @@ struct qemu_work_item {
static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
{
qemu_mutex_lock(&cpu->work_mutex);
- if (cpu->queued_work_first == NULL) {
- cpu->queued_work_first = wi;
- } else {
- cpu->queued_work_last->next = wi;
- }
- cpu->queued_work_last = wi;
- wi->next = NULL;
+ QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
wi->done = false;
qemu_mutex_unlock(&cpu->work_mutex);
@@ -306,17 +300,14 @@ void process_queued_cpu_work(CPUState *cpu)
{
struct qemu_work_item *wi;
- if (cpu->queued_work_first == NULL) {
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
+ qemu_mutex_unlock(&cpu->work_mutex);
return;
}
-
- qemu_mutex_lock(&cpu->work_mutex);
- while (cpu->queued_work_first != NULL) {
- wi = cpu->queued_work_first;
- cpu->queued_work_first = wi->next;
- if (!cpu->queued_work_first) {
- cpu->queued_work_last = NULL;
- }
+ while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
+ wi = QSIMPLEQ_FIRST(&cpu->work_list);
+ QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
qemu_mutex_unlock(&cpu->work_mutex);
if (wi->exclusive) {
/* Running work items outside the BQL avoids the following deadlock:
@@ -97,9 +97,19 @@ bool cpu_is_stopped(CPUState *cpu)
return cpu->stopped || !runstate_is_running();
}
+static inline bool cpu_work_list_empty(CPUState *cpu)
+{
+ bool ret;
+
+ qemu_mutex_lock(&cpu->work_mutex);
+ ret = QSIMPLEQ_EMPTY(&cpu->work_list);
+ qemu_mutex_unlock(&cpu->work_mutex);
+ return ret;
+}
+
static bool cpu_thread_is_idle(CPUState *cpu)
{
- if (cpu->stop || cpu->queued_work_first) {
+ if (cpu->stop || !cpu_work_list_empty(cpu)) {
return false;
}
if (cpu_is_stopped(cpu)) {
@@ -1498,7 +1508,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu = first_cpu;
}
- while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
+ while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
atomic_mb_set(&tcg_current_rr_cpu, cpu);
current_cpu = cpu;
@@ -370,6 +370,7 @@ static void cpu_common_initfn(Object *obj)
cpu->nr_threads = 1;
qemu_mutex_init(&cpu->work_mutex);
+ QSIMPLEQ_INIT(&cpu->work_list);
QTAILQ_INIT(&cpu->breakpoints);
QTAILQ_INIT(&cpu->watchpoints);
@@ -331,8 +331,8 @@ struct qemu_work_item;
* @opaque: User data.
* @mem_io_pc: Host Program Counter at which the memory was accessed.
* @kvm_fd: vCPU file descriptor for KVM.
- * @work_mutex: Lock to prevent multiple access to queued_work_*.
- * @queued_work_first: First asynchronous work pending.
+ * @work_mutex: Lock to prevent multiple access to @work_list.
+ * @work_list: List of pending asynchronous work.
* @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
* to @trace_dstate).
* @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
@@ -376,7 +376,7 @@ struct CPUState {
sigjmp_buf jmp_env;
QemuMutex work_mutex;
- struct qemu_work_item *queued_work_first, *queued_work_last;
+ QSIMPLEQ_HEAD(, qemu_work_item) work_list;
CPUAddressSpace *cpu_ases;
int num_ases;