Message ID | 1648216709-44755-4-git-send-email-wanpengli@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: X86: Scaling Guest OS Critical Sections with boosting | expand |
On Fri, Mar 25, 2022, Wanpeng Li wrote: > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 425fd7f38fa9..6b300496bbd0 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -10375,6 +10375,28 @@ static int vcpu_run(struct kvm_vcpu *vcpu) > return r; > } > > +static int kvm_vcpu_non_preemptable(struct kvm_vcpu *vcpu) s/preemtable/preemptible And I'd recommend inverting the return, and also return a bool, i.e. static bool kvm_vcpu_is_preemptible(struct kvm_vcpu *vcpu) > +{ > + int count; > + > + if (!vcpu->arch.pv_pc.preempt_count_enabled) > + return 0; > + > + if (!kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_pc.preempt_count_cache, > + &count, sizeof(int))) > + return (count & ~PREEMPT_NEED_RESCHED); This cements PREEMPT_NEED_RESCHED into KVM's guest/host ABI. I doubt the sched folks will be happy with that. > + > + return 0; > +} > +
On Wed, 30 Mar 2022 at 08:07, Sean Christopherson <seanjc@google.com> wrote: > > On Fri, Mar 25, 2022, Wanpeng Li wrote: > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > > index 425fd7f38fa9..6b300496bbd0 100644 > > --- a/arch/x86/kvm/x86.c > > +++ b/arch/x86/kvm/x86.c > > @@ -10375,6 +10375,28 @@ static int vcpu_run(struct kvm_vcpu *vcpu) > > return r; > > } > > > > +static int kvm_vcpu_non_preemptable(struct kvm_vcpu *vcpu) > > s/preemtable/preemptible > > And I'd recommend inverting the return, and also return a bool, i.e. > > static bool kvm_vcpu_is_preemptible(struct kvm_vcpu *vcpu) Good suggestion. > > > +{ > > + int count; > > + > > + if (!vcpu->arch.pv_pc.preempt_count_enabled) > > + return 0; > > + > > + if (!kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_pc.preempt_count_cache, > > + &count, sizeof(int))) > > + return (count & ~PREEMPT_NEED_RESCHED); > > This cements PREEMPT_NEED_RESCHED into KVM's guest/host ABI. I doubt the sched > folks will be happy with that. > > > + > > + return 0; > > +} > > +
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 425fd7f38fa9..6b300496bbd0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10375,6 +10375,28 @@ static int vcpu_run(struct kvm_vcpu *vcpu) return r; } +static int kvm_vcpu_non_preemptable(struct kvm_vcpu *vcpu) +{ + int count; + + if (!vcpu->arch.pv_pc.preempt_count_enabled) + return 0; + + if (!kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_pc.preempt_count_cache, + &count, sizeof(int))) + return (count & ~PREEMPT_NEED_RESCHED); + + return 0; +} + +bool kvm_arch_boost_candidate(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.irq_disabled || kvm_vcpu_non_preemptable(vcpu)) + return true; + + return false; +} + static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 252ee4a61b58..9f1a7d9540de 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1428,6 +1428,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); int kvm_arch_create_vm_debugfs(struct kvm *kvm); +bool kvm_arch_boost_candidate(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9581a24c3d17..ee5a788892e0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3545,6 +3545,11 @@ bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) return false; } +bool __weak kvm_arch_boost_candidate(struct kvm_vcpu *vcpu) +{ + return true; +} + void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) { struct kvm *kvm = me->kvm; @@ -3580,6 +3585,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) !kvm_arch_dy_has_pending_interrupt(vcpu) && !kvm_arch_vcpu_in_kernel(vcpu)) continue; + if (!kvm_arch_boost_candidate(vcpu)) + continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue;