Message ID | 1389316720-15603-1-git-send-email-scottwood@freescale.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 10.01.2014, at 02:18, Scott Wood <scottwood@freescale.com> wrote: > Simplify the handling of lazy EE by going directly from fully-enabled > to hard-disabled. This replaces the lazy_irq_pending() check > (including its misplaced kvm_guest_exit() call). > > As suggested by Tiejun Chen, move the interrupt disabling into > kvmppc_prepare_to_enter() rather than have each caller do it. Also > move the IRQ enabling on heavyweight exit into > kvmppc_prepare_to_enter(). > > Signed-off-by: Scott Wood <scottwood@freescale.com> Thanks, applied to kvm-ppc-queue. Alex > --- > v2: > - rebased > - removed redundant (on 64-bit) and possibly performance-affecting WARNs > - addressed cosmetic feedback > > arch/powerpc/include/asm/kvm_ppc.h | 6 ++++++ > arch/powerpc/kvm/book3s_pr.c | 14 ++++++-------- > arch/powerpc/kvm/booke.c | 12 +++++------- > arch/powerpc/kvm/powerpc.c | 26 +++++++++----------------- > 4 files changed, 26 insertions(+), 32 deletions(-) > > diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h > index c8317fb..e64881cd 100644 > --- a/arch/powerpc/include/asm/kvm_ppc.h > +++ b/arch/powerpc/include/asm/kvm_ppc.h > @@ -455,6 +455,12 @@ static inline void kvmppc_fix_ee_before_entry(void) > trace_hardirqs_on(); > > #ifdef CONFIG_PPC64 > + /* > + * To avoid races, the caller must have gone directly from having > + * interrupts fully-enabled to hard-disabled. > + */ > + WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); > + > /* Only need to enable IRQs by hard enabling them after this */ > local_paca->irq_happened = 0; > local_paca->soft_enabled = 1; > diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c > index fe14ca3..ce7accc 100644 > --- a/arch/powerpc/kvm/book3s_pr.c > +++ b/arch/powerpc/kvm/book3s_pr.c > @@ -1023,14 +1023,14 @@ program_interrupt: > * and if we really did time things so badly, then we just exit > * again due to a host external interrupt. > */ > - local_irq_disable(); > s = kvmppc_prepare_to_enter(vcpu); > - if (s <= 0) { > - local_irq_enable(); > + if (s <= 0) > r = s; > - } else { > + else { > + /* interrupts now hard-disabled */ > kvmppc_fix_ee_before_entry(); > } > + > kvmppc_handle_lost_ext(vcpu); > } > > @@ -1277,12 +1277,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) > * really did time things so badly, then we just exit again due to > * a host external interrupt. > */ > - local_irq_disable(); > ret = kvmppc_prepare_to_enter(vcpu); > - if (ret <= 0) { > - local_irq_enable(); > + if (ret <= 0) > goto out; > - } > + /* interrupts now hard-disabled */ > > /* Save FPU state in stack */ > if (current->thread.regs->msr & MSR_FP) > diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c > index 53e65a2..626b431 100644 > --- a/arch/powerpc/kvm/booke.c > +++ b/arch/powerpc/kvm/booke.c > @@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) > local_irq_enable(); > kvm_vcpu_block(vcpu); > clear_bit(KVM_REQ_UNHALT, &vcpu->requests); > - local_irq_disable(); > + hard_irq_disable(); > > kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); > r = 1; > @@ -692,13 +692,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) > return -EINVAL; > } > > - local_irq_disable(); > s = kvmppc_prepare_to_enter(vcpu); > if (s <= 0) { > - local_irq_enable(); > ret = s; > goto out; > } > + /* interrupts now hard-disabled */ > > #ifdef CONFIG_PPC_FPU > /* Save userspace FPU state in stack */ > @@ -1217,12 +1216,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, > * aren't already exiting to userspace for some other reason. > */ > if (!(r & RESUME_HOST)) { > - local_irq_disable(); > s = kvmppc_prepare_to_enter(vcpu); > - if (s <= 0) { > - local_irq_enable(); > + if (s <= 0) > r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); > - } else { > + else { > + /* interrupts now hard-disabled */ > kvmppc_fix_ee_before_entry(); > } > } > diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c > index 9ae9768..ea65029 100644 > --- a/arch/powerpc/kvm/powerpc.c > +++ b/arch/powerpc/kvm/powerpc.c > @@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) > */ > int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) > { > - int r = 1; > + int r; > + > + WARN_ON(irqs_disabled()); > + hard_irq_disable(); > > - WARN_ON_ONCE(!irqs_disabled()); > while (true) { > if (need_resched()) { > local_irq_enable(); > cond_resched(); > - local_irq_disable(); > + hard_irq_disable(); > continue; > } > > @@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) > local_irq_enable(); > trace_kvm_check_requests(vcpu); > r = kvmppc_core_check_requests(vcpu); > - local_irq_disable(); > + hard_irq_disable(); > if (r > 0) > continue; > break; > @@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) > continue; > } > > -#ifdef CONFIG_PPC64 > - /* lazy EE magic */ > - hard_irq_disable(); > - if (lazy_irq_pending()) { > - /* Got an interrupt in between, try again */ > - local_irq_enable(); > - local_irq_disable(); > - kvm_guest_exit(); > - continue; > - } > -#endif > - > kvm_guest_enter(); > - break; > + return 1; > } > > + /* return to host */ > + local_irq_enable(); > return r; > } > EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); > -- > 1.8.3.2 > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index c8317fb..e64881cd 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -455,6 +455,12 @@ static inline void kvmppc_fix_ee_before_entry(void) trace_hardirqs_on(); #ifdef CONFIG_PPC64 + /* + * To avoid races, the caller must have gone directly from having + * interrupts fully-enabled to hard-disabled. + */ + WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); + /* Only need to enable IRQs by hard enabling them after this */ local_paca->irq_happened = 0; local_paca->soft_enabled = 1; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3..ce7accc 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1023,14 +1023,14 @@ program_interrupt: * and if we really did time things so badly, then we just exit * again due to a host external interrupt. */ - local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); - if (s <= 0) { - local_irq_enable(); + if (s <= 0) r = s; - } else { + else { + /* interrupts now hard-disabled */ kvmppc_fix_ee_before_entry(); } + kvmppc_handle_lost_ext(vcpu); } @@ -1277,12 +1277,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) * really did time things so badly, then we just exit again due to * a host external interrupt. */ - local_irq_disable(); ret = kvmppc_prepare_to_enter(vcpu); - if (ret <= 0) { - local_irq_enable(); + if (ret <= 0) goto out; - } + /* interrupts now hard-disabled */ /* Save FPU state in stack */ if (current->thread.regs->msr & MSR_FP) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a2..626b431 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) local_irq_enable(); kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); - local_irq_disable(); + hard_irq_disable(); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); r = 1; @@ -692,13 +692,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return -EINVAL; } - local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) { - local_irq_enable(); ret = s; goto out; } + /* interrupts now hard-disabled */ #ifdef CONFIG_PPC_FPU /* Save userspace FPU state in stack */ @@ -1217,12 +1216,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * aren't already exiting to userspace for some other reason. */ if (!(r & RESUME_HOST)) { - local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); - if (s <= 0) { - local_irq_enable(); + if (s <= 0) r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); - } else { + else { + /* interrupts now hard-disabled */ kvmppc_fix_ee_before_entry(); } } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9ae9768..ea65029 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) */ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) { - int r = 1; + int r; + + WARN_ON(irqs_disabled()); + hard_irq_disable(); - WARN_ON_ONCE(!irqs_disabled()); while (true) { if (need_resched()) { local_irq_enable(); cond_resched(); - local_irq_disable(); + hard_irq_disable(); continue; } @@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) local_irq_enable(); trace_kvm_check_requests(vcpu); r = kvmppc_core_check_requests(vcpu); - local_irq_disable(); + hard_irq_disable(); if (r > 0) continue; break; @@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) continue; } -#ifdef CONFIG_PPC64 - /* lazy EE magic */ - hard_irq_disable(); - if (lazy_irq_pending()) { - /* Got an interrupt in between, try again */ - local_irq_enable(); - local_irq_disable(); - kvm_guest_exit(); - continue; - } -#endif - kvm_guest_enter(); - break; + return 1; } + /* return to host */ + local_irq_enable(); return r; } EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
Simplify the handling of lazy EE by going directly from fully-enabled to hard-disabled. This replaces the lazy_irq_pending() check (including its misplaced kvm_guest_exit() call). As suggested by Tiejun Chen, move the interrupt disabling into kvmppc_prepare_to_enter() rather than have each caller do it. Also move the IRQ enabling on heavyweight exit into kvmppc_prepare_to_enter(). Signed-off-by: Scott Wood <scottwood@freescale.com> --- v2: - rebased - removed redundant (on 64-bit) and possibly performance-affecting WARNs - addressed cosmetic feedback arch/powerpc/include/asm/kvm_ppc.h | 6 ++++++ arch/powerpc/kvm/book3s_pr.c | 14 ++++++-------- arch/powerpc/kvm/booke.c | 12 +++++------- arch/powerpc/kvm/powerpc.c | 26 +++++++++----------------- 4 files changed, 26 insertions(+), 32 deletions(-)