@@ -161,7 +161,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;
- qemu_cond_wait(&cpu->cond, &cpu->lock);
+ qemu_cond_wait(&cpu->cond, cpu->lock);
current_cpu = self_cpu;
}
cpu_mutex_unlock(cpu);
@@ -92,6 +92,12 @@ static unsigned int throttle_percentage;
#define CPU_THROTTLE_PCT_MAX 99
#define CPU_THROTTLE_TIMESLICE_NS 10000000
+static inline bool qemu_is_tcg_rr(void)
+{
+ /* in `make check-qtest', "use_icount && !tcg_enabled()" might be true */
+ return use_icount || (tcg_enabled() && !qemu_tcg_mttcg_enabled());
+}
+
/*
* Note: we index the bitmap with cpu->cpu_index + 1 so that the logic
* also works during early CPU initialization, when cpu->cpu_index is set to
@@ -104,25 +110,75 @@ bool no_cpu_mutex_locked(void)
return bitmap_empty(cpu_lock_bitmap, CPU_LOCK_BITMAP_SIZE);
}
-void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
+static __thread bool iothread_locked;
+/*
+ * In TCG rr mode, we make the BQL a recursive mutex, so that we can use it for
+ * all vCPUs while keeping the interface as if the locks were per-CPU.
+ *
+ * The fact that the BQL is implemented recursively is invisible to BQL users;
+ * the mutex API we export (qemu_mutex_lock_iothread() etc.) is non-recursive.
+ *
+ * Locking order: the BQL is always acquired before CPU locks.
+ */
+static __thread int iothread_lock_count;
+
+static void rr_cpu_mutex_lock(void)
{
-/* coverity gets confused by the indirect function call */
+ if (iothread_lock_count++ == 0) {
+ /*
+ * Circumvent qemu_mutex_lock_iothread()'s state keeping by
+ * acquiring the BQL directly.
+ */
+ qemu_mutex_lock(&qemu_global_mutex);
+ }
+}
+
+static void rr_cpu_mutex_unlock(void)
+{
+ g_assert(iothread_lock_count > 0);
+ if (--iothread_lock_count == 0) {
+ /*
+ * Circumvent qemu_mutex_unlock_iothread()'s state keeping by
+ * releasing the BQL directly.
+ */
+ qemu_mutex_unlock(&qemu_global_mutex);
+ }
+}
+
+static void do_cpu_mutex_lock(CPUState *cpu, const char *file, int line)
+{
+ /* coverity gets confused by the indirect function call */
#ifdef __COVERITY__
- qemu_mutex_lock_impl(&cpu->lock, file, line);
+ qemu_mutex_lock_impl(cpu->lock, file, line);
#else
QemuMutexLockFunc f = atomic_read(&qemu_mutex_lock_func);
+ f(cpu->lock, file, line);
+#endif
+}
+
+void cpu_mutex_lock_impl(CPUState *cpu, const char *file, int line)
+{
g_assert(!cpu_mutex_locked(cpu));
set_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
- f(&cpu->lock, file, line);
-#endif
+
+ if (qemu_is_tcg_rr()) {
+ rr_cpu_mutex_lock();
+ } else {
+ do_cpu_mutex_lock(cpu, file, line);
+ }
}
void cpu_mutex_unlock_impl(CPUState *cpu, const char *file, int line)
{
g_assert(cpu_mutex_locked(cpu));
- qemu_mutex_unlock_impl(&cpu->lock, file, line);
clear_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
+
+ if (qemu_is_tcg_rr()) {
+ rr_cpu_mutex_unlock();
+ return;
+ }
+ qemu_mutex_unlock_impl(cpu->lock, file, line);
}
bool cpu_mutex_locked(const CPUState *cpu)
@@ -130,6 +186,20 @@ bool cpu_mutex_locked(const CPUState *cpu)
return test_bit(cpu->cpu_index + 1, cpu_lock_bitmap);
}
+void cpu_mutex_destroy(CPUState *cpu)
+{
+ /*
+ * In TCG RR, cpu->lock is the BQL under the hood. In all other modes,
+ * cpu->lock is a standalone per-CPU lock.
+ */
+ if (qemu_is_tcg_rr()) {
+ cpu->lock = NULL;
+ } else {
+ qemu_mutex_destroy(cpu->lock);
+ g_free(cpu->lock);
+ }
+}
+
bool cpu_is_stopped(CPUState *cpu)
{
return cpu->stopped || !runstate_is_running();
@@ -1883,8 +1953,6 @@ bool qemu_in_vcpu_thread(void)
return current_cpu && qemu_cpu_is_self(current_cpu);
}
-static __thread bool iothread_locked = false;
-
bool qemu_mutex_iothread_locked(void)
{
return iothread_locked;
@@ -1903,6 +1971,8 @@ void qemu_mutex_lock_iothread_impl(const char *file, int line)
g_assert(!qemu_mutex_iothread_locked());
bql_lock(&qemu_global_mutex, file, line);
+ g_assert(iothread_lock_count == 0);
+ iothread_lock_count++;
iothread_locked = true;
}
@@ -1910,7 +1980,10 @@ void qemu_mutex_unlock_iothread(void)
{
g_assert(qemu_mutex_iothread_locked());
iothread_locked = false;
- qemu_mutex_unlock(&qemu_global_mutex);
+ g_assert(iothread_lock_count > 0);
+ if (--iothread_lock_count == 0) {
+ qemu_mutex_unlock(&qemu_global_mutex);
+ }
}
void qemu_cond_wait_iothread(QemuCond *cond)
@@ -2146,6 +2219,16 @@ void qemu_init_vcpu(CPUState *cpu)
cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
}
+ /*
+ * In TCG RR, cpu->lock is the BQL under the hood. In all other modes,
+ * cpu->lock is a standalone per-CPU lock.
+ */
+ if (qemu_is_tcg_rr()) {
+ qemu_mutex_destroy(cpu->lock);
+ g_free(cpu->lock);
+ cpu->lock = &qemu_global_mutex;
+ }
+
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (hax_enabled()) {
@@ -369,7 +369,8 @@ static void cpu_common_initfn(Object *obj)
cpu->nr_cores = 1;
cpu->nr_threads = 1;
- qemu_mutex_init(&cpu->lock);
+ cpu->lock = g_new(QemuMutex, 1);
+ qemu_mutex_init(cpu->lock);
qemu_cond_init(&cpu->cond);
QSIMPLEQ_INIT(&cpu->work_list);
QTAILQ_INIT(&cpu->breakpoints);
@@ -382,7 +383,7 @@ static void cpu_common_finalize(Object *obj)
{
CPUState *cpu = CPU(obj);
- qemu_mutex_destroy(&cpu->lock);
+ cpu_mutex_destroy(cpu);
}
static int64_t cpu_common_get_arch_id(CPUState *cpu)
@@ -377,7 +377,7 @@ struct CPUState {
uint64_t random_seed;
sigjmp_buf jmp_env;
- QemuMutex lock;
+ QemuMutex *lock;
/* fields below protected by @lock */
QemuCond cond;
QSIMPLEQ_HEAD(, qemu_work_item) work_list;
@@ -485,6 +485,12 @@ void cpu_mutex_unlock_impl(CPUState *cpu, const char *file, int line);
*/
bool cpu_mutex_locked(const CPUState *cpu);
+/**
+ * cpu_mutex_destroy - Handle how to destroy this CPU's mutex
+ * @cpu: the CPU whose mutex to destroy
+ */
+void cpu_mutex_destroy(CPUState *cpu);
+
/**
* no_cpu_mutex_locked - check whether any CPU mutex is held
*
@@ -18,3 +18,10 @@ bool no_cpu_mutex_locked(void)
{
return true;
}
+
+void cpu_mutex_destroy(CPUState *cpu)
+{
+ qemu_mutex_destroy(cpu->lock);
+ g_free(cpu->lock);
+ cpu->lock = NULL;
+}