Message ID | 20230910082911.3378782-18-guoren@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | riscv: Add Native/Paravirt qspinlock support | expand |
On Sun, Sep 10, 2023 at 04:29:11AM -0400, guoren@kernel.org wrote: > From: Guo Ren <guoren@linux.alibaba.com> > > We only need to call the kvm_vcpu_kick() and bring target_vcpu > from the halt state. No irq raised, no other request, just a pure > vcpu_kick. > > Signed-off-by: Guo Ren <guoren@linux.alibaba.com> > Signed-off-by: Guo Ren <guoren@kernel.org> > --- > arch/riscv/kvm/vcpu_sbi_pvlock.c | 19 +++++++++++++++++++ > 1 file changed, 19 insertions(+) > > diff --git a/arch/riscv/kvm/vcpu_sbi_pvlock.c b/arch/riscv/kvm/vcpu_sbi_pvlock.c > index 544a456c5041..914fc58aedfe 100644 > --- a/arch/riscv/kvm/vcpu_sbi_pvlock.c > +++ b/arch/riscv/kvm/vcpu_sbi_pvlock.c > @@ -12,6 +12,24 @@ > #include <asm/sbi.h> > #include <asm/kvm_vcpu_sbi.h> > > +static int kvm_sbi_ext_pvlock_kick_cpu(struct kvm_vcpu *vcpu) > +{ > + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; > + struct kvm *kvm = vcpu->kvm; > + struct kvm_vcpu *target; > + > + target = kvm_get_vcpu_by_id(kvm, cp->a0); > + if (!target) > + return SBI_ERR_INVALID_PARAM; > + > + kvm_vcpu_kick(target); > + > + if (READ_ONCE(target->ready)) > + kvm_vcpu_yield_to(target); > + > + return SBI_SUCCESS; > +} > + > static int kvm_sbi_ext_pvlock_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, > struct kvm_vcpu_sbi_return *retdata) > { > @@ -21,6 +39,7 @@ static int kvm_sbi_ext_pvlock_handler(struct kvm_vcpu *vcpu, struct kvm_run *run > > switch (funcid) { > case SBI_EXT_PVLOCK_KICK_CPU: > + ret = kvm_sbi_ext_pvlock_kick_cpu(vcpu); > break; > default: > ret = SBI_ERR_NOT_SUPPORTED; > -- > 2.36.1 > LGTM: Reviewed-by: Leonardo Bras <leobras@redhat.com> Thanks! Leo
diff --git a/arch/riscv/kvm/vcpu_sbi_pvlock.c b/arch/riscv/kvm/vcpu_sbi_pvlock.c index 544a456c5041..914fc58aedfe 100644 --- a/arch/riscv/kvm/vcpu_sbi_pvlock.c +++ b/arch/riscv/kvm/vcpu_sbi_pvlock.c @@ -12,6 +12,24 @@ #include <asm/sbi.h> #include <asm/kvm_vcpu_sbi.h> +static int kvm_sbi_ext_pvlock_kick_cpu(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *target; + + target = kvm_get_vcpu_by_id(kvm, cp->a0); + if (!target) + return SBI_ERR_INVALID_PARAM; + + kvm_vcpu_kick(target); + + if (READ_ONCE(target->ready)) + kvm_vcpu_yield_to(target); + + return SBI_SUCCESS; +} + static int kvm_sbi_ext_pvlock_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_vcpu_sbi_return *retdata) { @@ -21,6 +39,7 @@ static int kvm_sbi_ext_pvlock_handler(struct kvm_vcpu *vcpu, struct kvm_run *run switch (funcid) { case SBI_EXT_PVLOCK_KICK_CPU: + ret = kvm_sbi_ext_pvlock_kick_cpu(vcpu); break; default: ret = SBI_ERR_NOT_SUPPORTED;