@@ -5226,86 +5226,82 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(req_int_win))
kvm_make_request(KVM_REQ_EVENT, vcpu);
- if (!vcpu->requests)
- goto no_requests;
-
- for_each_set_bit(req, &vcpu->requests, BITS_PER_LONG) {
- clear_bit(req, &vcpu->requests);
- switch (req) {
- case KVM_REQ_MMU_RELOAD:
- kvm_mmu_unload(vcpu);
- r = kvm_mmu_reload(vcpu);
- if (unlikely(r)) {
- kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ if (vcpu->requests)
+ for_each_set_bit(req, &vcpu->requests, BITS_PER_LONG) {
+ clear_bit(req, &vcpu->requests);
+ switch (req) {
+ case KVM_REQ_MMU_RELOAD:
+ kvm_mmu_unload(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r)) {
+ kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+ goto out;
+ }
+ break;
+ case KVM_REQ_MIGRATE_TIMER:
+ __kvm_migrate_timers(vcpu);
+ break;
+ case KVM_REQ_CLOCK_UPDATE:
+ r = kvm_guest_time_update(vcpu);
+ if (unlikely(r))
+ goto out;
+ break;
+ case KVM_REQ_MMU_SYNC:
+ kvm_mmu_sync_roots(vcpu);
+ break;
+ case KVM_REQ_TLB_FLUSH:
+ kvm_x86_ops->tlb_flush(vcpu);
+ break;
+ case KVM_REQ_REPORT_TPR_ACCESS:
+ vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
+ r = 0;
goto out;
- }
- break;
- case KVM_REQ_MIGRATE_TIMER:
- __kvm_migrate_timers(vcpu);
- break;
- case KVM_REQ_CLOCK_UPDATE:
- r = kvm_guest_time_update(vcpu);
- if (unlikely(r))
+ case KVM_REQ_TRIPLE_FAULT:
+ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ r = 0;
goto out;
- break;
- case KVM_REQ_MMU_SYNC:
- kvm_mmu_sync_roots(vcpu);
- break;
- case KVM_REQ_TLB_FLUSH:
- kvm_x86_ops->tlb_flush(vcpu);
- break;
- case KVM_REQ_REPORT_TPR_ACCESS:
- vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
- r = 0;
- goto out;
- case KVM_REQ_TRIPLE_FAULT:
- vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
- r = 0;
- goto out;
- case KVM_REQ_DEACTIVATE_FPU:
- vcpu->fpu_active = 0;
- kvm_x86_ops->fpu_deactivate(vcpu);
- break;
- case KVM_REQ_APF_HALT:
- /* Page is swapped out. Do synthetic halt */
- vcpu->arch.apf.halted = true;
- r = 1;
- goto out;
- case KVM_REQ_STEAL_UPDATE:
- record_steal_time(vcpu);
- break;
- case KVM_REQ_NMI:
- process_nmi(vcpu);
- break;
- case KVM_REQ_IMMEDIATE_EXIT:
- req_immediate_exit = true;
- break;
- case KVM_REQ_PMU:
- kvm_handle_pmu_event(vcpu);
- break;
- case KVM_REQ_PMI:
- kvm_deliver_pmi(vcpu);
- break;
- case KVM_REQ_EVENT:
- inject_pending_event(vcpu);
-
- /* enable NMI/IRQ window open exits if needed */
- if (vcpu->arch.nmi_pending)
- kvm_x86_ops->enable_nmi_window(vcpu);
- else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
- kvm_x86_ops->enable_irq_window(vcpu);
-
- if (kvm_lapic_enabled(vcpu)) {
- update_cr8_intercept(vcpu);
- kvm_lapic_sync_to_vapic(vcpu);
+ case KVM_REQ_DEACTIVATE_FPU:
+ vcpu->fpu_active = 0;
+ kvm_x86_ops->fpu_deactivate(vcpu);
+ break;
+ case KVM_REQ_APF_HALT:
+ /* Page is swapped out. Do synthetic halt */
+ vcpu->arch.apf.halted = true;
+ r = 1;
+ goto out;
+ case KVM_REQ_STEAL_UPDATE:
+ record_steal_time(vcpu);
+ break;
+ case KVM_REQ_NMI:
+ process_nmi(vcpu);
+ break;
+ case KVM_REQ_IMMEDIATE_EXIT:
+ req_immediate_exit = true;
+ break;
+ case KVM_REQ_PMU:
+ kvm_handle_pmu_event(vcpu);
+ break;
+ case KVM_REQ_PMI:
+ kvm_deliver_pmi(vcpu);
+ break;
+ case KVM_REQ_EVENT:
+ inject_pending_event(vcpu);
+
+ /* enable NMI/IRQ window open exits if needed */
+ if (vcpu->arch.nmi_pending)
+ kvm_x86_ops->enable_nmi_window(vcpu);
+ else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+ kvm_x86_ops->enable_irq_window(vcpu);
+
+ if (kvm_lapic_enabled(vcpu)) {
+ update_cr8_intercept(vcpu);
+ kvm_lapic_sync_to_vapic(vcpu);
+ }
+ break;
+ default:
+ BUG();
}
- break;
- default:
- BUG();
}
- }
-
-no_requests:
preempt_disable();
A previous patch introduced a goto to make the patch clearer. This patch cleans up the code but has no functionality changes. Signed-off-by: Avi Kivity <avi@redhat.com> --- arch/x86/kvm/x86.c | 148 ++++++++++++++++++++++++++--------------------------- 1 file changed, 72 insertions(+), 76 deletions(-)