@@ -3499,7 +3499,7 @@ VCPU matching underlying host.
---------------------
:Capability: basic
-:Architectures: arm64, mips
+:Architectures: arm64, mips, riscv
:Type: vcpu ioctl
:Parameters: struct kvm_reg_list (in/out)
:Returns: 0 on success; -1 on error
@@ -657,6 +657,366 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
return 0;
}
+static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = 0;
+
+ for (i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
+ i++) {
+ u64 size;
+ u64 reg;
+
+ /*
+ * Avoid reporting config reg if the corresponding extension
+ * was not available.
+ */
+ if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
+ !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ continue;
+ else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
+ !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ continue;
+
+ size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
+{
+ return copy_config_reg_indices(vcpu, NULL);
+}
+
+static inline unsigned long num_core_regs(void)
+{
+ return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
+}
+
+static int copy_core_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_core_regs();
+
+ for (i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_csr_regs(void)
+{
+ unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+
+ if (kvm_riscv_aia_available())
+ n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+ return n;
+}
+
+static int copy_csr_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+ int n2 = 0;
+
+ /* copy general csr regs */
+ for (i = 0; i < n1; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_GENERAL | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy AIA csr regs */
+ if (kvm_riscv_aia_available()) {
+ n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+ for (i = 0; i < n2; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_AIA | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+ }
+
+ return n1 + n2;
+}
+
+static inline unsigned long num_timer_regs(void)
+{
+ return sizeof(struct kvm_riscv_timer) / sizeof(u64);
+}
+
+static int copy_timer_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_timer_regs();
+
+ for (i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, f))
+ return sizeof(cntx->fp.f) / sizeof(u32);
+ else
+ return 0;
+}
+
+static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_fp_f_regs(vcpu);
+
+ for (i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, d))
+ return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
+ else
+ return 0;
+}
+
+static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_fp_d_regs(vcpu);
+ u64 reg;
+
+ /* copy fp.d.f indices */
+ for (i = 0; i < n-1; i++) {
+ reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy fp.d.fcsr indices */
+ reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ }
+
+ return n;
+}
+
+static int copy_isa_ext_reg_indices(u64 __user *uindices)
+{
+ unsigned int i, n = 0;
+ unsigned long host_isa_ext;
+
+ for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
+
+ host_isa_ext = kvm_isa_ext_arr[i];
+ if (!__riscv_isa_extension_available(NULL, host_isa_ext))
+ continue;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static inline unsigned long num_isa_ext_regs(void)
+{
+ return copy_isa_ext_reg_indices(NULL);
+}
+
+static inline unsigned long num_sbi_ext_regs(void)
+{
+ /*
+ * number of KVM_REG_RISCV_SBI_SINGLE +
+ * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
+ */
+ return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
+}
+
+static int copy_sbi_ext_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n;
+
+ /* copy KVM_REG_RISCV_SBI_SINGLE */
+ n = KVM_RISCV_SBI_EXT_MAX;
+ for (i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_SINGLE | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy KVM_REG_RISCV_SBI_MULTI */
+ n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
+ for (i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_MULTI_EN | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_MULTI_DIS | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return num_sbi_ext_regs();
+}
+
+/*
+ * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
+ *
+ * This is for all registers.
+ */
+static unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long res = 0;
+
+ res += num_config_regs(vcpu);
+ res += num_core_regs();
+ res += num_csr_regs();
+ res += num_timer_regs();
+ res += num_fp_f_regs(vcpu);
+ res += num_fp_d_regs(vcpu);
+ res += num_isa_ext_regs();
+ res += num_sbi_ext_regs();
+
+ return res;
+}
+
+/*
+ * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
+ */
+static int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int ret;
+
+ ret = copy_config_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_core_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_csr_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_timer_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_fp_f_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_fp_d_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_isa_ext_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sbi_ext_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -758,6 +1118,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_riscv_vcpu_get_reg(vcpu, ®);
break;
}
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned int n;
+
+ r = -EFAULT;
+ if (copy_from_user(®_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+ if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
+ break;
+ r = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
default:
break;
}