@@ -62,8 +62,6 @@ extern HVFState *hvf_state;
struct hvf_vcpu_state {
uint64_t fd;
void *exit;
- struct timespec ts;
- bool sleeping;
};
void assert_hvf_ok(hv_return_t ret);
@@ -320,18 +320,7 @@ int hvf_arch_init_vcpu(CPUState *cpu)
void hvf_kick_vcpu_thread(CPUState *cpu)
{
- if (cpu->hvf->sleeping) {
- /*
- * When sleeping, make sure we always send signals. Also, clear the
- * timespec, so that an IPI that arrives between setting hvf->sleeping
- * and the nanosleep syscall still aborts the sleep.
- */
- cpu->thread_kicked = false;
- cpu->hvf->ts = (struct timespec){ };
- cpus_kick_thread(cpu);
- } else {
- hv_vcpus_exit(&cpu->hvf->fd, 1);
- }
+ hv_vcpus_exit(&cpu->hvf->fd, 1);
}
static int hvf_inject_interrupts(CPUState *cpu)
@@ -355,17 +344,11 @@ int hvf_vcpu_exec(CPUState *cpu)
CPUARMState *env = &arm_cpu->env;
hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
hv_return_t r;
- int ret = 0;
-
- qemu_mutex_unlock_iothread();
- do {
+ while (1) {
bool advance_pc = false;
- qemu_mutex_lock_iothread();
- current_cpu = cpu;
qemu_wait_io_event_common(cpu);
- qemu_mutex_unlock_iothread();
flush_cpu_state(cpu);
@@ -374,10 +357,10 @@ int hvf_vcpu_exec(CPUState *cpu)
}
if (cpu->halted) {
- qemu_mutex_lock_iothread();
return EXCP_HLT;
}
+ qemu_mutex_unlock_iothread();
assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
/* handle VMEXIT */
@@ -385,15 +368,13 @@ int hvf_vcpu_exec(CPUState *cpu)
uint64_t syndrome = hvf_exit->exception.syndrome;
uint32_t ec = syn_get_ec(syndrome);
+ qemu_mutex_lock_iothread();
switch (exit_reason) {
case HV_EXIT_REASON_EXCEPTION:
/* This is the main one, handle below. */
break;
case HV_EXIT_REASON_VTIMER_ACTIVATED:
- qemu_mutex_lock_iothread();
- current_cpu = cpu;
qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
- qemu_mutex_unlock_iothread();
continue;
case HV_EXIT_REASON_CANCELED:
/* we got kicked, no exit to process */
@@ -402,7 +383,6 @@ int hvf_vcpu_exec(CPUState *cpu)
assert(0);
}
- ret = 0;
switch (ec) {
case EC_DATAABORT: {
bool isv = syndrome & ARM_EL_ISV;
@@ -413,9 +393,6 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t srt = (syndrome >> 16) & 0x1f;
uint64_t val = 0;
- qemu_mutex_lock_iothread();
- current_cpu = cpu;
-
DPRINTF("data abort: [pc=0x%llx va=0x%016llx pa=0x%016llx isv=%x "
"iswrite=%x s1ptw=%x len=%d srt=%d]\n",
env->pc, hvf_exit->exception.virtual_address,
@@ -446,8 +423,6 @@ int hvf_vcpu_exec(CPUState *cpu)
hvf_set_reg(cpu, srt, val);
}
- qemu_mutex_unlock_iothread();
-
advance_pc = true;
break;
}
@@ -491,83 +466,18 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
}
case EC_WFX_TRAP:
- if (!(syndrome & WFX_IS_WFE) && !(cpu->interrupt_request &
- (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ))) {
- uint64_t cval, ctl, val, diff, now;
-
- /* Set up a local timer for vtimer if necessary ... */
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
- assert_hvf_ok(r);
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
- assert_hvf_ok(r);
-
- asm volatile("mrs %0, cntvct_el0" : "=r"(val));
- diff = cval - val;
-
- now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
- gt_cntfrq_period_ns(arm_cpu);
-
- /* Timer disabled or masked, just wait for long */
- if (!(ctl & 1) || (ctl & 2)) {
- diff = (120 * NANOSECONDS_PER_SECOND) /
- gt_cntfrq_period_ns(arm_cpu);
- }
-
- if (diff < INT64_MAX) {
- uint64_t ns = diff * gt_cntfrq_period_ns(arm_cpu);
- struct timespec *ts = &cpu->hvf->ts;
-
- *ts = (struct timespec){
- .tv_sec = ns / NANOSECONDS_PER_SECOND,
- .tv_nsec = ns % NANOSECONDS_PER_SECOND,
- };
-
- /*
- * Waking up easily takes 1ms, don't go to sleep for smaller
- * time periods than 2ms.
- */
- if (!ts->tv_sec && (ts->tv_nsec < (SCALE_MS * 2))) {
- advance_pc = true;
- break;
- }
-
- /* Set cpu->hvf->sleeping so that we get a SIG_IPI signal. */
- cpu->hvf->sleeping = true;
- smp_mb();
-
- /* Bail out if we received an IRQ meanwhile */
- if (cpu->thread_kicked || (cpu->interrupt_request &
- (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ))) {
- cpu->hvf->sleeping = false;
- break;
- }
-
- /* nanosleep returns on signal, so we wake up on kick. */
- nanosleep(ts, NULL);
-
- /* Out of sleep - either naturally or because of a kick */
- cpu->hvf->sleeping = false;
- }
-
- advance_pc = true;
- }
break;
case EC_AA64_HVC:
cpu_synchronize_state(cpu);
- qemu_mutex_lock_iothread();
- current_cpu = cpu;
if (arm_is_psci_call(arm_cpu, EXCP_HVC)) {
arm_handle_psci_call(arm_cpu);
} else {
DPRINTF("unknown HVC! %016llx", env->xregs[0]);
env->xregs[0] = -1;
}
- qemu_mutex_unlock_iothread();
break;
case EC_AA64_SMC:
cpu_synchronize_state(cpu);
- qemu_mutex_lock_iothread();
- current_cpu = cpu;
if (arm_is_psci_call(arm_cpu, EXCP_SMC)) {
arm_handle_psci_call(arm_cpu);
} else {
@@ -575,7 +485,6 @@ int hvf_vcpu_exec(CPUState *cpu)
env->xregs[0] = -1;
env->pc += 4;
}
- qemu_mutex_unlock_iothread();
break;
default:
cpu_synchronize_state(cpu);
@@ -594,10 +503,5 @@ int hvf_vcpu_exec(CPUState *cpu)
r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
assert_hvf_ok(r);
}
- } while (ret == 0);
-
- qemu_mutex_lock_iothread();
- current_cpu = cpu;
-
- return ret;
+ }
}
- Stop setting current_cpu - Remove the previous WFx handler - Simplify locking - Remove the unused ret variable in hvf_vcpu_exec Signed-off-by: Peter Collingbourne <pcc@google.com> --- include/sysemu/hvf_int.h | 2 - target/arm/hvf/hvf.c | 106 ++------------------------------------- 2 files changed, 5 insertions(+), 103 deletions(-)