@@ -362,8 +362,11 @@ struct kvm_vcpu_arch {
u64 *mce_banks;
/* used for guest single stepping over the given code position */
+ bool singlestep_pending;
u16 singlestep_cs;
+ u16 singlestep_pending_cs;
unsigned long singlestep_rip;
+ unsigned long singlestep_pending_rip;
/* fields used by HYPER-V emulation */
u64 hv_vapic;
};
@@ -820,4 +823,6 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_define_shared_msr(unsigned index, u32 msr);
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+int kvm_check_guest_singlestep(struct kvm_vcpu *vcpu);
+
#endif /* _ASM_X86_KVM_HOST_H */
@@ -3489,6 +3489,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
goto out;
if (need_resched())
schedule();
+
+ if (unlikely(vcpu->arch.singlestep_pending)) {
+ ret = kvm_check_guest_singlestep(vcpu);
+ if (ret == 0)
+ goto out;
+ }
}
vmx->emulation_required = 0;
@@ -3441,6 +3441,27 @@ static void cache_all_regs(struct kvm_vcpu *vcpu)
vcpu->arch.regs_dirty = ~0;
}
+static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_segment kvm_seg;
+
+ kvm_get_segment(vcpu, &kvm_seg, seg);
+ return kvm_seg.selector;
+}
+
+static void queue_singlestep(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ vcpu->arch.singlestep_pending = true;
+ vcpu->arch.singlestep_pending_cs =
+ get_segment_selector(vcpu, VCPU_SREG_CS);
+ vcpu->arch.singlestep_pending_rip = kvm_rip_read(vcpu);
+ } else {
+ vcpu->arch.dr6 |= DR6_BS;
+ kvm_queue_exception(vcpu, DB_VECTOR);
+ }
+}
+
int emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
u16 error_code,
@@ -3449,6 +3470,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
int r, shadow_mask;
struct decode_cache *c;
struct kvm_run *run = vcpu->run;
+ bool singlestep;
kvm_clear_exception_queue(vcpu);
vcpu->arch.mmio_fault_cr2 = cr2;
@@ -3515,8 +3537,12 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
}
}
+ singlestep = vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_TF;
+
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
+ if (singlestep)
+ queue_singlestep(vcpu);
return EMULATE_DONE;
}
@@ -3549,6 +3575,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ if (singlestep)
+ queue_singlestep(vcpu);
+
if (vcpu->mmio_is_write) {
vcpu->mmio_needed = 0;
return EMULATE_DO_MMIO;
@@ -4450,6 +4479,26 @@ out:
return r;
}
+int kvm_check_guest_singlestep(struct kvm_vcpu *vcpu)
+{
+ unsigned long rip = kvm_rip_read(vcpu);
+
+ vcpu->arch.singlestep_pending = false;
+
+ if (vcpu->arch.singlestep_pending_cs !=
+ get_segment_selector(vcpu, VCPU_SREG_CS) ||
+ vcpu->arch.singlestep_pending_rip != rip)
+ return 1;
+
+ vcpu->run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
+ vcpu->run->debug.arch.dr7 = 0;
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.pc = get_segment_base(vcpu, VCPU_SREG_CS) + rip;
+ vcpu->run->debug.arch.exception = DB_VECTOR;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_check_guest_singlestep);
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -4471,6 +4520,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
r = 1;
while (r > 0) {
+ if (unlikely(vcpu->arch.singlestep_pending)) {
+ r = kvm_check_guest_singlestep(vcpu);
+ if (r == 0)
+ break;
+ }
+
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu);
else {
@@ -4828,14 +4883,6 @@ static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
}
-static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_segment kvm_seg;
-
- kvm_get_segment(vcpu, &kvm_seg, seg);
- return kvm_seg.selector;
-}
-
static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
{
struct kvm_segment segvar = {
@@ -5607,6 +5654,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1;
+ vcpu->arch.singlestep_pending = false;
+
return kvm_x86_ops->vcpu_reset(vcpu);
}