Message ID | 20211118083912.981995-3-atishp@rivosinc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add SBI v0.2 support for KVM | expand |
On Thu, Nov 18, 2021 at 2:10 PM Atish Patra <atishp@rivosinc.com> wrote: > > From: Atish Patra <atish.patra@wdc.com> > > With SBI v0.2, there may be more SBI extensions in future. It makes more > sense to group related extensions in separate files. Guest kernel will > choose appropriate SBI version dynamically. > > Move the existing implementation to a separate file so that it can be > removed in future without much conflict. > > Reviewed-by: Anup Patel <anup.patel@wdc.com> > Signed-off-by: Atish Patra <atish.patra@wdc.com> > Signed-off-by: Atish Patra <atishp@rivosinc.com> I have queued this for 5.17 Thanks, Anup > --- > arch/riscv/include/asm/kvm_vcpu_sbi.h | 2 + > arch/riscv/kvm/Makefile | 1 + > arch/riscv/kvm/vcpu_sbi.c | 148 +++----------------------- > arch/riscv/kvm/vcpu_sbi_v01.c | 126 ++++++++++++++++++++++ > 4 files changed, 146 insertions(+), 131 deletions(-) > create mode 100644 arch/riscv/kvm/vcpu_sbi_v01.c > > diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h > index 1a4cb0db2d0b..704151969ceb 100644 > --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h > +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h > @@ -25,5 +25,7 @@ struct kvm_vcpu_sbi_extension { > bool *exit); > }; > > +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); > const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); > + > #endif /* __RISCV_KVM_VCPU_SBI_H__ */ > diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile > index 30cdd1df0098..d3d5ff3a6019 100644 > --- a/arch/riscv/kvm/Makefile > +++ b/arch/riscv/kvm/Makefile > @@ -23,4 +23,5 @@ kvm-y += vcpu_exit.o > kvm-y += vcpu_fp.o > kvm-y += vcpu_switch.o > kvm-y += vcpu_sbi.o > +kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o > kvm-y += vcpu_timer.o > diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c > index 32376906ff20..a8e0191cd9fc 100644 > --- a/arch/riscv/kvm/vcpu_sbi.c > +++ b/arch/riscv/kvm/vcpu_sbi.c > @@ -9,9 +9,7 @@ > #include <linux/errno.h> > #include <linux/err.h> > #include <linux/kvm_host.h> > -#include <asm/csr.h> > #include <asm/sbi.h> > -#include <asm/kvm_vcpu_timer.h> > #include <asm/kvm_vcpu_sbi.h> > > static int kvm_linux_err_map_sbi(int err) > @@ -32,8 +30,21 @@ static int kvm_linux_err_map_sbi(int err) > }; > } > > -static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, > - struct kvm_run *run) > +#ifdef CONFIG_RISCV_SBI_V01 > +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; > +#else > +static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { > + .extid_start = -1UL, > + .extid_end = -1UL, > + .handler = NULL, > +}; > +#endif > + > +static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { > + &vcpu_sbi_ext_v01, > +}; > + > +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) > { > struct kvm_cpu_context *cp = &vcpu->arch.guest_context; > > @@ -71,123 +82,6 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) > return 0; > } > > -#ifdef CONFIG_RISCV_SBI_V01 > - > -static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, > - struct kvm_run *run, u32 type) > -{ > - int i; > - struct kvm_vcpu *tmp; > - > - kvm_for_each_vcpu(i, tmp, vcpu->kvm) > - tmp->arch.power_off = true; > - kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); > - > - memset(&run->system_event, 0, sizeof(run->system_event)); > - run->system_event.type = type; > - run->exit_reason = KVM_EXIT_SYSTEM_EVENT; > -} > - > -static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, > - unsigned long *out_val, > - struct kvm_cpu_trap *utrap, > - bool *exit) > -{ > - ulong hmask; > - int i, ret = 0; > - u64 next_cycle; > - struct kvm_vcpu *rvcpu; > - struct cpumask cm, hm; > - struct kvm *kvm = vcpu->kvm; > - struct kvm_cpu_context *cp = &vcpu->arch.guest_context; > - > - switch (cp->a7) { > - case SBI_EXT_0_1_CONSOLE_GETCHAR: > - case SBI_EXT_0_1_CONSOLE_PUTCHAR: > - /* > - * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be > - * handled in kernel so we forward these to user-space > - */ > - kvm_riscv_vcpu_sbi_forward(vcpu, run); > - *exit = true; > - break; > - case SBI_EXT_0_1_SET_TIMER: > -#if __riscv_xlen == 32 > - next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; > -#else > - next_cycle = (u64)cp->a0; > -#endif > - ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); > - break; > - case SBI_EXT_0_1_CLEAR_IPI: > - ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); > - break; > - case SBI_EXT_0_1_SEND_IPI: > - if (cp->a0) > - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, > - utrap); > - else > - hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; > - if (utrap->scause) > - break; > - > - for_each_set_bit(i, &hmask, BITS_PER_LONG) { > - rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); > - ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); > - if (ret < 0) > - break; > - } > - break; > - case SBI_EXT_0_1_SHUTDOWN: > - kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); > - *exit = true; > - break; > - case SBI_EXT_0_1_REMOTE_FENCE_I: > - case SBI_EXT_0_1_REMOTE_SFENCE_VMA: > - case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: > - if (cp->a0) > - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, > - utrap); > - else > - hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; > - if (utrap->scause) > - break; > - > - cpumask_clear(&cm); > - for_each_set_bit(i, &hmask, BITS_PER_LONG) { > - rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); > - if (rvcpu->cpu < 0) > - continue; > - cpumask_set_cpu(rvcpu->cpu, &cm); > - } > - riscv_cpuid_to_hartid_mask(&cm, &hm); > - if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) > - ret = sbi_remote_fence_i(cpumask_bits(&hm)); > - else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) > - ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), > - cp->a1, cp->a2); > - else > - ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), > - cp->a1, cp->a2, cp->a3); > - break; > - default: > - ret = -EINVAL; > - break; > - } > - > - return ret; > -} > - > -const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { > - .extid_start = SBI_EXT_0_1_SET_TIMER, > - .extid_end = SBI_EXT_0_1_SHUTDOWN, > - .handler = kvm_sbi_ext_v01_handler, > -}; > - > -static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { > - &vcpu_sbi_ext_v01, > -}; > - > const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid) > { > int i = 0; > @@ -214,9 +108,11 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) > > sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7); > if (sbi_ext && sbi_ext->handler) { > +#ifdef CONFIG_RISCV_SBI_V01 > if (cp->a7 >= SBI_EXT_0_1_SET_TIMER && > cp->a7 <= SBI_EXT_0_1_SHUTDOWN) > ext_is_v01 = true; > +#endif > ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit); > } else { > /* Return error for unsupported SBI calls */ > @@ -256,13 +152,3 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) > > return ret; > } > - > -#else > - > -int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) > -{ > - kvm_riscv_vcpu_sbi_forward(vcpu, run); > - return 0; > -} > - > -#endif > diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c > new file mode 100644 > index 000000000000..08097d1c13c1 > --- /dev/null > +++ b/arch/riscv/kvm/vcpu_sbi_v01.c > @@ -0,0 +1,126 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Copyright (c) 2021 Western Digital Corporation or its affiliates. > + * > + * Authors: > + * Atish Patra <atish.patra@wdc.com> > + */ > + > +#include <linux/errno.h> > +#include <linux/err.h> > +#include <linux/kvm_host.h> > +#include <asm/csr.h> > +#include <asm/sbi.h> > +#include <asm/kvm_vcpu_timer.h> > +#include <asm/kvm_vcpu_sbi.h> > + > +static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, > + struct kvm_run *run, u32 type) > +{ > + int i; > + struct kvm_vcpu *tmp; > + > + kvm_for_each_vcpu(i, tmp, vcpu->kvm) > + tmp->arch.power_off = true; > + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); > + > + memset(&run->system_event, 0, sizeof(run->system_event)); > + run->system_event.type = type; > + run->exit_reason = KVM_EXIT_SYSTEM_EVENT; > +} > + > +static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, > + unsigned long *out_val, > + struct kvm_cpu_trap *utrap, > + bool *exit) > +{ > + ulong hmask; > + int i, ret = 0; > + u64 next_cycle; > + struct kvm_vcpu *rvcpu; > + struct cpumask cm, hm; > + struct kvm *kvm = vcpu->kvm; > + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; > + > + switch (cp->a7) { > + case SBI_EXT_0_1_CONSOLE_GETCHAR: > + case SBI_EXT_0_1_CONSOLE_PUTCHAR: > + /* > + * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be > + * handled in kernel so we forward these to user-space > + */ > + kvm_riscv_vcpu_sbi_forward(vcpu, run); > + *exit = true; > + break; > + case SBI_EXT_0_1_SET_TIMER: > +#if __riscv_xlen == 32 > + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; > +#else > + next_cycle = (u64)cp->a0; > +#endif > + ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); > + break; > + case SBI_EXT_0_1_CLEAR_IPI: > + ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); > + break; > + case SBI_EXT_0_1_SEND_IPI: > + if (cp->a0) > + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, > + utrap); > + else > + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; > + if (utrap->scause) > + break; > + > + for_each_set_bit(i, &hmask, BITS_PER_LONG) { > + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); > + ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); > + if (ret < 0) > + break; > + } > + break; > + case SBI_EXT_0_1_SHUTDOWN: > + kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); > + *exit = true; > + break; > + case SBI_EXT_0_1_REMOTE_FENCE_I: > + case SBI_EXT_0_1_REMOTE_SFENCE_VMA: > + case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: > + if (cp->a0) > + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, > + utrap); > + else > + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; > + if (utrap->scause) > + break; > + > + cpumask_clear(&cm); > + for_each_set_bit(i, &hmask, BITS_PER_LONG) { > + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); > + if (rvcpu->cpu < 0) > + continue; > + cpumask_set_cpu(rvcpu->cpu, &cm); > + } > + riscv_cpuid_to_hartid_mask(&cm, &hm); > + if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) > + ret = sbi_remote_fence_i(cpumask_bits(&hm)); > + else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) > + ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), > + cp->a1, cp->a2); > + else > + ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), > + cp->a1, cp->a2, cp->a3); > + break; > + default: > + ret = -EINVAL; > + break; > + }; > + > + return ret; > +} > + > +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { > + .extid_start = SBI_EXT_0_1_SET_TIMER, > + .extid_end = SBI_EXT_0_1_SHUTDOWN, > + .handler = kvm_sbi_ext_v01_handler, > +}; > -- > 2.33.1 > > > -- > kvm-riscv mailing list > kvm-riscv@lists.infradead.org > http://lists.infradead.org/mailman/listinfo/kvm-riscv
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 1a4cb0db2d0b..704151969ceb 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -25,5 +25,7 @@ struct kvm_vcpu_sbi_extension { bool *exit); }; +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); + #endif /* __RISCV_KVM_VCPU_SBI_H__ */ diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 30cdd1df0098..d3d5ff3a6019 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -23,4 +23,5 @@ kvm-y += vcpu_exit.o kvm-y += vcpu_fp.o kvm-y += vcpu_switch.o kvm-y += vcpu_sbi.o +kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o kvm-y += vcpu_timer.o diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 32376906ff20..a8e0191cd9fc 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -9,9 +9,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/kvm_host.h> -#include <asm/csr.h> #include <asm/sbi.h> -#include <asm/kvm_vcpu_timer.h> #include <asm/kvm_vcpu_sbi.h> static int kvm_linux_err_map_sbi(int err) @@ -32,8 +30,21 @@ static int kvm_linux_err_map_sbi(int err) }; } -static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, - struct kvm_run *run) +#ifdef CONFIG_RISCV_SBI_V01 +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; +#else +static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { + .extid_start = -1UL, + .extid_end = -1UL, + .handler = NULL, +}; +#endif + +static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { + &vcpu_sbi_ext_v01, +}; + +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; @@ -71,123 +82,6 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) return 0; } -#ifdef CONFIG_RISCV_SBI_V01 - -static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, - struct kvm_run *run, u32 type) -{ - int i; - struct kvm_vcpu *tmp; - - kvm_for_each_vcpu(i, tmp, vcpu->kvm) - tmp->arch.power_off = true; - kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); - - memset(&run->system_event, 0, sizeof(run->system_event)); - run->system_event.type = type; - run->exit_reason = KVM_EXIT_SYSTEM_EVENT; -} - -static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, - unsigned long *out_val, - struct kvm_cpu_trap *utrap, - bool *exit) -{ - ulong hmask; - int i, ret = 0; - u64 next_cycle; - struct kvm_vcpu *rvcpu; - struct cpumask cm, hm; - struct kvm *kvm = vcpu->kvm; - struct kvm_cpu_context *cp = &vcpu->arch.guest_context; - - switch (cp->a7) { - case SBI_EXT_0_1_CONSOLE_GETCHAR: - case SBI_EXT_0_1_CONSOLE_PUTCHAR: - /* - * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be - * handled in kernel so we forward these to user-space - */ - kvm_riscv_vcpu_sbi_forward(vcpu, run); - *exit = true; - break; - case SBI_EXT_0_1_SET_TIMER: -#if __riscv_xlen == 32 - next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; -#else - next_cycle = (u64)cp->a0; -#endif - ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); - break; - case SBI_EXT_0_1_CLEAR_IPI: - ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); - break; - case SBI_EXT_0_1_SEND_IPI: - if (cp->a0) - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - utrap); - else - hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; - if (utrap->scause) - break; - - for_each_set_bit(i, &hmask, BITS_PER_LONG) { - rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); - ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); - if (ret < 0) - break; - } - break; - case SBI_EXT_0_1_SHUTDOWN: - kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); - *exit = true; - break; - case SBI_EXT_0_1_REMOTE_FENCE_I: - case SBI_EXT_0_1_REMOTE_SFENCE_VMA: - case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: - if (cp->a0) - hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, - utrap); - else - hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; - if (utrap->scause) - break; - - cpumask_clear(&cm); - for_each_set_bit(i, &hmask, BITS_PER_LONG) { - rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); - if (rvcpu->cpu < 0) - continue; - cpumask_set_cpu(rvcpu->cpu, &cm); - } - riscv_cpuid_to_hartid_mask(&cm, &hm); - if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) - ret = sbi_remote_fence_i(cpumask_bits(&hm)); - else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) - ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), - cp->a1, cp->a2); - else - ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), - cp->a1, cp->a2, cp->a3); - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { - .extid_start = SBI_EXT_0_1_SET_TIMER, - .extid_end = SBI_EXT_0_1_SHUTDOWN, - .handler = kvm_sbi_ext_v01_handler, -}; - -static const struct kvm_vcpu_sbi_extension *sbi_ext[] = { - &vcpu_sbi_ext_v01, -}; - const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid) { int i = 0; @@ -214,9 +108,11 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7); if (sbi_ext && sbi_ext->handler) { +#ifdef CONFIG_RISCV_SBI_V01 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER && cp->a7 <= SBI_EXT_0_1_SHUTDOWN) ext_is_v01 = true; +#endif ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit); } else { /* Return error for unsupported SBI calls */ @@ -256,13 +152,3 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) return ret; } - -#else - -int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_riscv_vcpu_sbi_forward(vcpu, run); - return 0; -} - -#endif diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c new file mode 100644 index 000000000000..08097d1c13c1 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_v01.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <asm/csr.h> +#include <asm/sbi.h> +#include <asm/kvm_vcpu_timer.h> +#include <asm/kvm_vcpu_sbi.h> + +static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, + struct kvm_run *run, u32 type) +{ + int i; + struct kvm_vcpu *tmp; + + kvm_for_each_vcpu(i, tmp, vcpu->kvm) + tmp->arch.power_off = true; + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); + + memset(&run->system_event, 0, sizeof(run->system_event)); + run->system_event.type = type; + run->exit_reason = KVM_EXIT_SYSTEM_EVENT; +} + +static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, + struct kvm_cpu_trap *utrap, + bool *exit) +{ + ulong hmask; + int i, ret = 0; + u64 next_cycle; + struct kvm_vcpu *rvcpu; + struct cpumask cm, hm; + struct kvm *kvm = vcpu->kvm; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + + switch (cp->a7) { + case SBI_EXT_0_1_CONSOLE_GETCHAR: + case SBI_EXT_0_1_CONSOLE_PUTCHAR: + /* + * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be + * handled in kernel so we forward these to user-space + */ + kvm_riscv_vcpu_sbi_forward(vcpu, run); + *exit = true; + break; + case SBI_EXT_0_1_SET_TIMER: +#if __riscv_xlen == 32 + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; +#else + next_cycle = (u64)cp->a0; +#endif + ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle); + break; + case SBI_EXT_0_1_CLEAR_IPI: + ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT); + break; + case SBI_EXT_0_1_SEND_IPI: + if (cp->a0) + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, + utrap); + else + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; + if (utrap->scause) + break; + + for_each_set_bit(i, &hmask, BITS_PER_LONG) { + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); + ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT); + if (ret < 0) + break; + } + break; + case SBI_EXT_0_1_SHUTDOWN: + kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); + *exit = true; + break; + case SBI_EXT_0_1_REMOTE_FENCE_I: + case SBI_EXT_0_1_REMOTE_SFENCE_VMA: + case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: + if (cp->a0) + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, + utrap); + else + hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; + if (utrap->scause) + break; + + cpumask_clear(&cm); + for_each_set_bit(i, &hmask, BITS_PER_LONG) { + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i); + if (rvcpu->cpu < 0) + continue; + cpumask_set_cpu(rvcpu->cpu, &cm); + } + riscv_cpuid_to_hartid_mask(&cm, &hm); + if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) + ret = sbi_remote_fence_i(cpumask_bits(&hm)); + else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) + ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), + cp->a1, cp->a2); + else + ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), + cp->a1, cp->a2, cp->a3); + break; + default: + ret = -EINVAL; + break; + }; + + return ret; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = { + .extid_start = SBI_EXT_0_1_SET_TIMER, + .extid_end = SBI_EXT_0_1_SHUTDOWN, + .handler = kvm_sbi_ext_v01_handler, +};