@@ -26,6 +26,7 @@
#include "exec/memattrs.h"
#include "qapi/qapi-types-run-state.h"
#include "qemu/bitmap.h"
+#include "qemu/main-loop.h"
#include "qemu/rcu_queue.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
@@ -82,6 +83,8 @@ struct TranslationBlock;
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
* @has_work: Callback for checking if there is work to do. Called with the
* CPU lock held.
+ * @has_work_with_iothread_lock: Callback for checking if there is work to do.
+ * Called with both the BQL and the CPU lock held.
* @do_interrupt: Callback for interrupt handling.
* @do_unaligned_access: Callback for unaligned access handling, if
* the target defines #TARGET_ALIGNED_ONLY.
@@ -167,6 +170,7 @@ typedef struct CPUClass {
int reset_dump_flags;
bool (*has_work)(CPUState *cpu);
+ bool (*has_work_with_iothread_lock)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
MMUAccessType access_type,
@@ -800,14 +804,41 @@ const char *parse_cpu_option(const char *cpu_option);
static inline bool cpu_has_work(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
+ bool has_cpu_lock = cpu_mutex_locked(cpu);
+ bool (*func)(CPUState *cpu);
bool ret;
+ /* some targets require us to hold the BQL when checking for work */
+ if (cc->has_work_with_iothread_lock) {
+ if (qemu_mutex_iothread_locked()) {
+ func = cc->has_work_with_iothread_lock;
+ goto call_func;
+ }
+
+ if (has_cpu_lock) {
+ /* avoid deadlock by acquiring the locks in order */
+ cpu_mutex_unlock(cpu);
+ }
+ qemu_mutex_lock_iothread();
+ cpu_mutex_lock(cpu);
+
+ ret = cc->has_work_with_iothread_lock(cpu);
+
+ qemu_mutex_unlock_iothread();
+ if (!has_cpu_lock) {
+ cpu_mutex_unlock(cpu);
+ }
+ return ret;
+ }
+
g_assert(cc->has_work);
- if (cpu_mutex_locked(cpu)) {
- return cc->has_work(cpu);
+ func = cc->has_work;
+ call_func:
+ if (has_cpu_lock) {
+ return func(cpu);
}
cpu_mutex_lock(cpu);
- ret = cc->has_work(cpu);
+ ret = func(cpu);
cpu_mutex_unlock(cpu);
return ret;
}