@@ -425,7 +425,7 @@ static int hax_vcpu_interrupt(CPUArchState *env)
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
hax_inject_interrupt(env, irq);
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
}
}
@@ -473,7 +473,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
}
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
@@ -402,7 +402,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_NMI);
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
} else {
@@ -414,7 +414,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
(cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_HARD);
if (line >= 0) {
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
@@ -440,7 +440,7 @@ int hvf_process_events(CPUState *cpu_state)
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -453,7 +453,7 @@ int hvf_process_events(CPUState *cpu_state)
do_cpu_sipi(cpu);
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_TPR);
hvf_cpu_synchronize_state(cpu_state);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
@@ -3632,7 +3632,7 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
*/
events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
- cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
} else {
/* Keep these in cs->interrupt_request. */
events.smi.pending = 0;
@@ -3995,7 +3995,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
qemu_mutex_lock_iothread();
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
qemu_mutex_unlock_iothread();
DPRINTF("injected NMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
@@ -4006,7 +4006,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
qemu_mutex_lock_iothread();
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
qemu_mutex_unlock_iothread();
DPRINTF("injected SMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
@@ -4042,7 +4042,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
(env->eflags & IF_MASK)) {
int irq;
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
struct kvm_interrupt intr;
@@ -4113,7 +4113,7 @@ int kvm_arch_process_async_events(CPUState *cs)
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE);
kvm_cpu_synchronize_state(cs);
@@ -4143,7 +4143,7 @@ int kvm_arch_process_async_events(CPUState *cs)
}
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -4156,7 +4156,7 @@ int kvm_arch_process_async_events(CPUState *cs)
do_cpu_sipi(cpu);
}
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
- cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
kvm_cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
@@ -1332,7 +1332,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
switch (interrupt_request) {
#if !defined(CONFIG_USER_ONLY)
case CPU_INTERRUPT_POLL:
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
break;
#endif
@@ -1341,23 +1341,22 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
break;
case CPU_INTERRUPT_SMI:
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_SMI);
do_smm_enter(cpu);
break;
case CPU_INTERRUPT_NMI:
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI);
env->hflags2 |= HF2_NMI_MASK;
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
break;
case CPU_INTERRUPT_MCE:
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE);
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
break;
case CPU_INTERRUPT_HARD:
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing hardware INT=0x%02x\n", intno);
@@ -1372,7 +1371,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
break;
#endif
}
@@ -700,7 +700,7 @@ void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
env->hflags &= ~HF_GUEST_MASK;
env->intercept = 0;
env->intercept_exceptions = 0;
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
env->tsc_offset = 0;
env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
@@ -790,14 +790,14 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (!vcpu->interruption_pending &&
cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
vcpu->interruptable = false;
new_int.InterruptionType = WHvX64PendingNmi;
new_int.InterruptionPending = 1;
new_int.InterruptionVector = 2;
}
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
}
}
@@ -820,7 +820,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
vcpu->interruptable && (env->eflags & IF_MASK)) {
assert(!new_int.InterruptionPending);
if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
new_int.InterruptionType = WHvX64PendingInterrupt;
@@ -911,7 +911,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
}
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
@@ -927,7 +927,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
}
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR);
whpx_cpu_synchronize_state(cpu);
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
env->tpr_access_type);