@@ -62,7 +62,7 @@ static void pic_unlock(struct kvm_pic *s)
kvm_for_each_vcpu(i, vcpu, s->kvm) {
if (kvm_apic_accept_pic_intr(vcpu)) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
return;
}
}
@@ -1309,12 +1309,12 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
result = 1;
vcpu->arch.pv.pv_unhalted = 1;
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
break;
case APIC_DM_SMI:
if (!kvm_inject_smi(vcpu)) {
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
result = 1;
}
break;
@@ -1322,7 +1322,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_NMI:
result = 1;
kvm_inject_nmi(vcpu);
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
break;
case APIC_DM_INIT:
@@ -1901,7 +1901,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
atomic_inc(&apic->lapic_timer.pending);
kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
if (from_timer_fn)
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
}
static void start_sw_tscdeadline(struct kvm_lapic *apic)
@@ -3566,7 +3566,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
/* Process the interrupt via kvm_check_and_inject_events(). */
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
return;
}
@@ -4266,7 +4266,7 @@ static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
if (vmx_deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_kick_boost(vcpu);
} else {
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
trig_mode, vector);
@@ -2296,11 +2296,19 @@ static inline bool kvm_vcpu_sched_enabled(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_pv_sched_enabled(&vcpu->arch);
}
+
+static inline void kvm_vcpu_kick_boost(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_set_sched(vcpu, true);
+ kvm_vcpu_kick(vcpu);
+}
#else
static inline int kvm_vcpu_set_sched(struct kvm_vcpu *vcpu, bool boost)
{
return 0;
}
+
+#define kvm_vcpu_kick_boost kvm_vcpu_kick
#endif
#endif
@@ -3460,6 +3460,14 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
if (kvm_vcpu_check_block(vcpu) < 0)
break;
+ /*
+ * Boost before scheduling out. Wakeup happens only on
+ * an event or a signal and hence it is beneficial to
+ * be scheduled ASAP. Ultimately, guest gets to idle loop
+ * and then will request deboost.
+ */
+ kvm_vcpu_set_sched(vcpu, true);
+
waited = true;
schedule();
}