@@ -262,7 +262,7 @@ static void update_apic_tpr(CPUState *cpu)
static void hvf_handle_interrupt(CPUState * cpu, int mask)
{
- cpu->interrupt_request |= mask;
+ cpu_interrupt_request_or(cpu, mask);
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
}
@@ -737,10 +737,12 @@ int hvf_vcpu_exec(CPUState *cpu)
ret = 0;
switch (exit_reason) {
case EXIT_REASON_HLT: {
+ uint32_t interrupt_request = cpu_interrupt_request(cpu);
+
macvm_set_rip(cpu, rip + ins_len);
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK))
- && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
+ && !(interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu_halted_set(cpu, 1);
ret = EXCP_HLT;
@@ -352,6 +352,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
uint8_t vector;
uint64_t intr_type;
+ uint32_t interrupt_request;
bool have_event = true;
if (env->interrupt_injected != -1) {
vector = env->interrupt_injected;
@@ -400,7 +401,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
};
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_NMI) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_NMI);
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
@@ -411,7 +412,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
}
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
- (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_HARD);
@@ -420,39 +421,42 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
}
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_HARD) {
vmx_set_int_window_exiting(cpu_state);
}
- return (cpu_state->interrupt_request
- & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
+ return cpu_interrupt_request(cpu_state) & (CPU_INTERRUPT_INIT |
+ CPU_INTERRUPT_TPR);
}
int hvf_process_events(CPUState *cpu_state)
{
X86CPU *cpu = X86_CPU(cpu_state);
CPUX86State *env = &cpu->env;
+ uint32_t interrupt_request;
env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
- if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_INIT) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_init(cpu);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_POLL) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
- if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+
+ interrupt_request = cpu_interrupt_request(cpu_state);
+ if (((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
+ (interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_halted_set(cpu_state, 0);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_sipi(cpu);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_TPR) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_TPR);
hvf_cpu_synchronize_state(cpu_state);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,