@@ -431,7 +431,7 @@ static inline bool cpu_handle_halt_locked(CPUState *cpu)
if (cpu_halted(cpu)) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
- if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
+ if ((cpu_interrupt_request(cpu) & CPU_INTERRUPT_POLL)
&& replay_interrupt()) {
X86CPU *x86_cpu = X86_CPU(cpu);
@@ -544,16 +544,17 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
- if (unlikely(atomic_read(&cpu->interrupt_request))) {
+ if (unlikely(cpu_interrupt_request(cpu))) {
int interrupt_request;
+
qemu_mutex_lock_iothread();
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = cpu_interrupt_request(cpu);
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG);
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
return true;
@@ -562,7 +563,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
- cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
cpu_halted_set(cpu, 1);
cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
@@ -599,10 +600,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* The target hook may have updated the 'cpu->interrupt_request';
* reload the 'interrupt_request' value */
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = cpu_interrupt_request(cpu);
}
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_EXITTB);
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
@@ -52,10 +52,16 @@ typedef struct TCGState {
static void tcg_handle_interrupt(CPUState *cpu, int mask)
{
int old_mask;
- g_assert(qemu_mutex_iothread_locked());
- old_mask = cpu->interrupt_request;
- cpu->interrupt_request |= mask;
+ if (!cpu_mutex_locked(cpu)) {
+ cpu_mutex_lock(cpu);
+ old_mask = cpu_interrupt_request(cpu);
+ cpu_interrupt_request_or(cpu, mask);
+ cpu_mutex_unlock(cpu);
+ } else {
+ old_mask = cpu_interrupt_request(cpu);
+ cpu_interrupt_request_or(cpu, mask);
+ }
/*
* If called from iothread context, wake the target cpu in
@@ -2392,7 +2392,7 @@ void dump_opcount_info(void)
void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
- cpu->interrupt_request |= mask;
+ cpu_interrupt_request_or(cpu, mask);
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
}