@@ -117,6 +117,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
+extern bool kvmppc_crit_inhibited_irq_pending(struct kvm_vcpu *vcpu);
extern bool kvmppc_critical_section(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
@@ -192,6 +192,21 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
}
+bool kvmppc_crit_inhibited_irq_pending(struct kvm_vcpu *vcpu)
+{
+ unsigned long *p = &vcpu->arch.pending_exceptions;
+
+ if (!(kvmppc_get_msr(vcpu) & MSR_EE))
+ return false;
+
+ if (test_bit(BOOK3S_IRQPRIO_DECREMENTER, p) ||
+ test_bit(BOOK3S_IRQPRIO_EXTERNAL, p) ||
+ test_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, p))
+ return true;
+
+ return false;
+}
+
int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
@@ -342,6 +342,41 @@ static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
#endif
}
+bool kvmppc_crit_inhibited_irq_pending(struct kvm_vcpu *vcpu)
+{
+ unsigned long *p = &vcpu->arch.pending_exceptions;
+ bool ee = !!(kvmppc_get_msr(vcpu) & MSR_EE);
+ bool ce = !!(kvmppc_get_msr(vcpu) & MSR_CE);
+ bool me = !!(kvmppc_get_msr(vcpu) & MSR_ME);
+ bool de = !!(kvmppc_get_msr(vcpu) & MSR_DE);
+
+ if (ee) {
+ if (test_bit(BOOKE_IRQPRIO_EXTERNAL, p) ||
+ test_bit(BOOKE_IRQPRIO_DBELL, p))
+ return true;
+ }
+
+ if (ce) {
+ if (test_bit(BOOKE_IRQPRIO_WATCHDOG, p) ||
+ test_bit(BOOKE_IRQPRIO_CRITICAL, p) ||
+ test_bit(BOOKE_IRQPRIO_DBELL_CRIT, p))
+ return true;
+ }
+
+ if (me) {
+ if (test_bit(BOOKE_IRQPRIO_MACHINE_CHECK, p))
+ return true;
+ }
+
+ if (de) {
+ if (test_bit(BOOKE_IRQPRIO_DEBUG, p))
+ return true;
+ }
+
+ return false;
+}
+
+
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
@@ -83,6 +83,20 @@ bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
return crit;
}
+static bool kvmppc_needs_emulation(struct kvm_vcpu *vcpu)
+{
+ /* XXX disable emulation for now, until we implemented everything */
+ if (true)
+ return false;
+
+ /* We're in a critical section, but an interrupt is pending */
+ if (kvmppc_critical_section(vcpu) &&
+ kvmppc_crit_inhibited_irq_pending(vcpu))
+ return true;
+
+ return false;
+}
+
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
@@ -141,6 +155,15 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
continue;
}
+ if (kvmppc_needs_emulation(vcpu)) {
+ /* Emulate one instruction, then try again */
+ local_irq_enable();
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+ kvmppc_emulate_any_instruction(vcpu);
+ hard_irq_disable();
+ continue;
+ }
+
kvm_guest_enter();
return 1;
}
Usually the idea behind critical sections is that we don't ever trap in them. However, we may not be that lucky always. When we do hit a critical section while an interrupt is pending, we need to make sure we can inject the interrupt right when the critical section is over. To achieve this, we just emulate every single instruction inside the critical section until we're out of it. Note: For new we don't trigger this code path until we properly emulate all the instructions necessary to run Linux guests well again. Signed-off-by: Alexander Graf <agraf@suse.de> --- arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s.c | 15 +++++++++++++++ arch/powerpc/kvm/booke.c | 35 +++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/powerpc.c | 23 +++++++++++++++++++++++ 4 files changed, 74 insertions(+)