@@ -65,6 +65,7 @@ struct KVMState
int broken_set_mem_region;
int migration_log;
int vcpu_events;
+ int robust_singlestep;
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
#endif
@@ -659,6 +660,12 @@ int kvm_init(int smp_cpus)
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif
+ s->robust_singlestep = 0;
+#ifdef KVM_CAP_X86_ROBUST_SINGLESTEP
+ s->robust_singlestep =
+ kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
+#endif
+
ret = kvm_arch_init(s, smp_cpus);
if (ret < 0)
goto err;
@@ -917,6 +924,11 @@ int kvm_has_vcpu_events(void)
return kvm_state->vcpu_events;
}
+int kvm_has_robust_singlestep(void)
+{
+ return kvm_state->robust_singlestep;
+}
+
void kvm_setup_guest_memory(void *start, size_t size)
{
if (!kvm_has_sync_mmu()) {
@@ -974,10 +986,6 @@ static void kvm_invoke_set_guest_debug(void *data)
struct kvm_set_guest_debug_data *dbg_data = data;
CPUState *env = dbg_data->env;
- if (env->kvm_vcpu_dirty) {
- kvm_arch_put_registers(env);
- env->kvm_vcpu_dirty = 0;
- }
dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
}
@@ -985,12 +993,12 @@ int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
{
struct kvm_set_guest_debug_data data;
- data.dbg.control = 0;
- if (env->singlestep_enabled)
- data.dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
+ data.dbg.control = reinject_trap;
+ if (env->singlestep_enabled) {
+ data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
+ }
kvm_arch_update_guest_debug(env, &data.dbg);
- data.dbg.control |= reinject_trap;
data.env = env;
on_vcpu(env, kvm_invoke_set_guest_debug, &data);
@@ -40,6 +40,7 @@ int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size);
int kvm_has_sync_mmu(void);
int kvm_has_vcpu_events(void);
+int kvm_has_robust_singlestep(void);
void kvm_setup_guest_memory(void *start, size_t size);
@@ -851,6 +851,37 @@ static int kvm_get_vcpu_events(CPUState *env)
return 0;
}
+static int kvm_guest_debug_workarounds(CPUState *env)
+{
+ int ret = 0;
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+ unsigned long reinject_trap = 0;
+
+ if (!kvm_has_vcpu_events()) {
+ if (env->exception_injected == 1) {
+ reinject_trap = KVM_GUESTDBG_INJECT_DB;
+ } else if (env->exception_injected == 3) {
+ reinject_trap = KVM_GUESTDBG_INJECT_BP;
+ }
+ env->exception_injected = -1;
+ }
+
+ /*
+ * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
+ * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
+ * by updating the debug state once again if single-stepping is on.
+ * Another reason to call kvm_update_guest_debug here is a pending debug
+ * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
+ * reinject them via SET_GUEST_DEBUG.
+ */
+ if (reinject_trap ||
+ (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
+ ret = kvm_update_guest_debug(env, reinject_trap);
+ }
+#endif /* KVM_CAP_SET_GUEST_DEBUG */
+ return ret;
+}
+
int kvm_arch_put_registers(CPUState *env)
{
int ret;
@@ -879,6 +910,11 @@ int kvm_arch_put_registers(CPUState *env)
if (ret < 0)
return ret;
+ /* must be last */
+ ret = kvm_guest_debug_workarounds(env);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -1122,10 +1158,13 @@ int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
} else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
handle = 1;
- if (!handle)
- kvm_update_guest_debug(cpu_single_env,
- (arch_info->exception == 1) ?
- KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
+ if (!handle) {
+ cpu_synchronize_state(cpu_single_env);
+ assert(cpu_single_env->exception_injected == -1);
+
+ cpu_single_env->exception_injected = arch_info->exception;
+ cpu_single_env->has_error_code = 0;
+ }
return handle;
}