Message ID | 20231214101552.100721-24-ajones@ventanamicro.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | RISC-V: Add steal-time support | expand |
On Thu, Dec 14, 2023 at 3:46 PM Andrew Jones <ajones@ventanamicro.com> wrote: > > Add a select SCHED_INFO to the KVM config in order to get run_delay > info. Then implement SBI STA's set-steal-time-shmem function and > kvm_riscv_vcpu_record_steal_time() to provide the steal-time info > to guests. > > Signed-off-by: Andrew Jones <ajones@ventanamicro.com> LGTM. Reviewed-by: Anup Patel <anup@brainfault.org> Regards, Anup > --- > arch/riscv/kvm/Kconfig | 1 + > arch/riscv/kvm/vcpu_sbi_sta.c | 96 ++++++++++++++++++++++++++++++++++- > 2 files changed, 95 insertions(+), 2 deletions(-) > > diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig > index dfc237d7875b..148e52b516cf 100644 > --- a/arch/riscv/kvm/Kconfig > +++ b/arch/riscv/kvm/Kconfig > @@ -32,6 +32,7 @@ config KVM > select KVM_XFER_TO_GUEST_WORK > select MMU_NOTIFIER > select PREEMPT_NOTIFIERS > + select SCHED_INFO > help > Support hosting virtualized guest machines. > > diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c > index 073bc47013b7..8b8dbee5500a 100644 > --- a/arch/riscv/kvm/vcpu_sbi_sta.c > +++ b/arch/riscv/kvm/vcpu_sbi_sta.c > @@ -6,21 +6,113 @@ > #include <linux/kconfig.h> > #include <linux/kernel.h> > #include <linux/kvm_host.h> > +#include <linux/mm.h> > +#include <linux/sizes.h> > > +#include <asm/bug.h> > +#include <asm/current.h> > #include <asm/kvm_vcpu_sbi.h> > +#include <asm/page.h> > #include <asm/sbi.h> > +#include <asm/uaccess.h> > > void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) > { > gpa_t shmem = vcpu->arch.sta.shmem; > + u64 last_steal = vcpu->arch.sta.last_steal; > + u32 *sequence_ptr, sequence; > + u64 *steal_ptr, steal; > + unsigned long hva; > + gfn_t gfn; > > if (shmem == INVALID_GPA) > return; > + > + /* > + * shmem is 64-byte aligned (see the enforcement in > + * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct > + * is 64 bytes, so we know all its offsets are in the same page. > + */ > + gfn = shmem >> PAGE_SHIFT; > + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); > + > + if (WARN_ON(kvm_is_error_hva(hva))) { > + vcpu->arch.sta.shmem = INVALID_GPA; > + return; > + } > + > + sequence_ptr = (u32 *)(hva + offset_in_page(shmem) + > + offsetof(struct sbi_sta_struct, sequence)); > + steal_ptr = (u64 *)(hva + offset_in_page(shmem) + > + offsetof(struct sbi_sta_struct, steal)); > + > + if (WARN_ON(get_user(sequence, sequence_ptr))) > + return; > + > + sequence = le32_to_cpu(sequence); > + sequence += 1; > + > + if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) > + return; > + > + if (!WARN_ON(get_user(steal, steal_ptr))) { > + steal = le64_to_cpu(steal); > + vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); > + steal += vcpu->arch.sta.last_steal - last_steal; > + WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); > + } > + > + sequence += 1; > + WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)); > + > + kvm_vcpu_mark_page_dirty(vcpu, gfn); > } > > static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) > { > - return SBI_ERR_FAILURE; > + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; > + unsigned long shmem_phys_lo = cp->a0; > + unsigned long shmem_phys_hi = cp->a1; > + u32 flags = cp->a2; > + struct sbi_sta_struct zero_sta = {0}; > + unsigned long hva; > + bool writable; > + gpa_t shmem; > + int ret; > + > + if (flags != 0) > + return SBI_ERR_INVALID_PARAM; > + > + if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE && > + shmem_phys_hi == SBI_STA_SHMEM_DISABLE) { > + vcpu->arch.sta.shmem = INVALID_GPA; > + return 0; > + } > + > + if (shmem_phys_lo & (SZ_64 - 1)) > + return SBI_ERR_INVALID_PARAM; > + > + shmem = shmem_phys_lo; > + > + if (shmem_phys_hi != 0) { > + if (IS_ENABLED(CONFIG_32BIT)) > + shmem |= ((gpa_t)shmem_phys_hi << 32); > + else > + return SBI_ERR_INVALID_ADDRESS; > + } > + > + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); > + if (kvm_is_error_hva(hva) || !writable) > + return SBI_ERR_INVALID_ADDRESS; > + > + ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); > + if (ret) > + return SBI_ERR_FAILURE; > + > + vcpu->arch.sta.shmem = shmem; > + vcpu->arch.sta.last_steal = current->sched_info.run_delay; > + > + return 0; > } > > static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, > @@ -46,7 +138,7 @@ static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, > > static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu) > { > - return 0; > + return !!sched_info_on(); > } > > const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = { > -- > 2.43.0 >
diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index dfc237d7875b..148e52b516cf 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -32,6 +32,7 @@ config KVM select KVM_XFER_TO_GUEST_WORK select MMU_NOTIFIER select PREEMPT_NOTIFIERS + select SCHED_INFO help Support hosting virtualized guest machines. diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c index 073bc47013b7..8b8dbee5500a 100644 --- a/arch/riscv/kvm/vcpu_sbi_sta.c +++ b/arch/riscv/kvm/vcpu_sbi_sta.c @@ -6,21 +6,113 @@ #include <linux/kconfig.h> #include <linux/kernel.h> #include <linux/kvm_host.h> +#include <linux/mm.h> +#include <linux/sizes.h> +#include <asm/bug.h> +#include <asm/current.h> #include <asm/kvm_vcpu_sbi.h> +#include <asm/page.h> #include <asm/sbi.h> +#include <asm/uaccess.h> void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) { gpa_t shmem = vcpu->arch.sta.shmem; + u64 last_steal = vcpu->arch.sta.last_steal; + u32 *sequence_ptr, sequence; + u64 *steal_ptr, steal; + unsigned long hva; + gfn_t gfn; if (shmem == INVALID_GPA) return; + + /* + * shmem is 64-byte aligned (see the enforcement in + * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct + * is 64 bytes, so we know all its offsets are in the same page. + */ + gfn = shmem >> PAGE_SHIFT; + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + + if (WARN_ON(kvm_is_error_hva(hva))) { + vcpu->arch.sta.shmem = INVALID_GPA; + return; + } + + sequence_ptr = (u32 *)(hva + offset_in_page(shmem) + + offsetof(struct sbi_sta_struct, sequence)); + steal_ptr = (u64 *)(hva + offset_in_page(shmem) + + offsetof(struct sbi_sta_struct, steal)); + + if (WARN_ON(get_user(sequence, sequence_ptr))) + return; + + sequence = le32_to_cpu(sequence); + sequence += 1; + + if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) + return; + + if (!WARN_ON(get_user(steal, steal_ptr))) { + steal = le64_to_cpu(steal); + vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); + steal += vcpu->arch.sta.last_steal - last_steal; + WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); + } + + sequence += 1; + WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)); + + kvm_vcpu_mark_page_dirty(vcpu, gfn); } static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) { - return SBI_ERR_FAILURE; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long shmem_phys_lo = cp->a0; + unsigned long shmem_phys_hi = cp->a1; + u32 flags = cp->a2; + struct sbi_sta_struct zero_sta = {0}; + unsigned long hva; + bool writable; + gpa_t shmem; + int ret; + + if (flags != 0) + return SBI_ERR_INVALID_PARAM; + + if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE && + shmem_phys_hi == SBI_STA_SHMEM_DISABLE) { + vcpu->arch.sta.shmem = INVALID_GPA; + return 0; + } + + if (shmem_phys_lo & (SZ_64 - 1)) + return SBI_ERR_INVALID_PARAM; + + shmem = shmem_phys_lo; + + if (shmem_phys_hi != 0) { + if (IS_ENABLED(CONFIG_32BIT)) + shmem |= ((gpa_t)shmem_phys_hi << 32); + else + return SBI_ERR_INVALID_ADDRESS; + } + + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); + if (kvm_is_error_hva(hva) || !writable) + return SBI_ERR_INVALID_ADDRESS; + + ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); + if (ret) + return SBI_ERR_FAILURE; + + vcpu->arch.sta.shmem = shmem; + vcpu->arch.sta.last_steal = current->sched_info.run_delay; + + return 0; } static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -46,7 +138,7 @@ static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu) { - return 0; + return !!sched_info_on(); } const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
Add a select SCHED_INFO to the KVM config in order to get run_delay info. Then implement SBI STA's set-steal-time-shmem function and kvm_riscv_vcpu_record_steal_time() to provide the steal-time info to guests. Signed-off-by: Andrew Jones <ajones@ventanamicro.com> --- arch/riscv/kvm/Kconfig | 1 + arch/riscv/kvm/vcpu_sbi_sta.c | 96 ++++++++++++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 2 deletions(-)