diff mbox series

[1/2] riscv: kvm: Add KVM_GET_REG_LIST API support

Message ID 921fc2e1a91887170e277acb1b52df57480a5736.1683791148.git.haibo1.xu@intel.com (mailing list archive)
State Superseded
Headers show
Series RISCV: Add KVM_GET_REG_LIST API | expand

Checks

Context Check Description
conchuod/cover_letter success Series has a cover letter
conchuod/tree_selection success Guessed tree name to be for-next at HEAD ac9a78681b92
conchuod/fixes_present success Fixes tag not required for -next series
conchuod/maintainers_pattern success MAINTAINERS pattern errors before the patch: 6 and now 6
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/kdoc fail Errors and warnings before: 0 this patch: 3
conchuod/build_rv64_clang_allmodconfig fail Errors and warnings before: 14 this patch: 17
conchuod/module_param success Was 0 now: 0
conchuod/build_rv64_gcc_allmodconfig fail Errors and warnings before: 31 this patch: 34
conchuod/build_rv32_defconfig success Build OK
conchuod/dtb_warn_rv64 success Errors and warnings before: 3 this patch: 3
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/checkpatch warning CHECK: Alignment should match open parenthesis CHECK: spaces preferred around that '*' (ctx:VxV) CHECK: spaces preferred around that '+' (ctx:VxV) CHECK: spaces preferred around that '-' (ctx:VxV)
conchuod/build_rv64_nommu_k210_defconfig success Build OK
conchuod/verify_fixes success No Fixes tag
conchuod/build_rv64_nommu_virt_defconfig success Build OK

Commit Message

Xu, Haibo1 May 11, 2023, 9:22 a.m. UTC
KVM_GET_REG_LIST API will return all registers that are available to
KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
regression issue during VM migration.

Since this API was already supported on arm64, it'd be straightforward
to enable it on riscv with similar code structure.

Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
---
 Documentation/virt/kvm/api.rst |   2 +-
 arch/riscv/kvm/vcpu.c          | 346 +++++++++++++++++++++++++++++++++
 2 files changed, 347 insertions(+), 1 deletion(-)

Comments

Andrew Jones May 11, 2023, 10:44 a.m. UTC | #1
On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> KVM_GET_REG_LIST API will return all registers that are available to
> KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> regression issue during VM migration.
> 
> Since this API was already supported on arm64, it'd be straightforward

s/it'd be/it is/

> to enable it on riscv with similar code structure.
> 
> Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
> ---
>  Documentation/virt/kvm/api.rst |   2 +-
>  arch/riscv/kvm/vcpu.c          | 346 +++++++++++++++++++++++++++++++++
>  2 files changed, 347 insertions(+), 1 deletion(-)
> 
> diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> index add067793b90..280e89abd004 100644
> --- a/Documentation/virt/kvm/api.rst
> +++ b/Documentation/virt/kvm/api.rst
> @@ -3499,7 +3499,7 @@ VCPU matching underlying host.
>  ---------------------
>  
>  :Capability: basic
> -:Architectures: arm64, mips
> +:Architectures: arm64, mips, riscv
>  :Type: vcpu ioctl
>  :Parameters: struct kvm_reg_list (in/out)
>  :Returns: 0 on success; -1 on error
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 8bd9f2a8a0b9..fb8834e4fa15 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -657,6 +657,334 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
>  	return 0;
>  }
>  
> +static inline unsigned long num_config_regs(void)
> +{
> +	return sizeof(struct kvm_riscv_config) / sizeof(unsigned long);
> +}
> +
> +static int copy_config_reg_indices(u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n = num_config_regs();
> +
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CONFIG | i;
                                          ^ this should be
					  size-ulong

  u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;

> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	return n;
> +}
> +
> +static inline unsigned long num_core_regs(void)
> +{
> +	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
> +}
> +
> +static int copy_core_reg_indices(u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n = num_core_regs();
> +
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CORE | i;
                                          ^ size-ulong

> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	return n;
> +}
> +
> +static inline unsigned long num_csr_regs(void)
> +{
> +	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
> +
> +	if (kvm_riscv_aia_available())
> +		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
> +
> +	return n;
> +}
> +
> +static int copy_csr_reg_indices(u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
> +	int n2 = 0;
> +
> +	/* copy general csr regs */
> +	for (i = 0; i < n1; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR |
                                          ^ size-ulong

> +				  KVM_REG_RISCV_CSR_GENERAL | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	/* copy AIA csr regs */
> +	if (kvm_riscv_aia_available()) {
> +		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
> +
> +		for (i = 0; i < n2; i++) {
> +			u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR |
                                                  ^ size-ulong

> +					  KVM_REG_RISCV_CSR_AIA | i;
> +
> +			if (uindices) {
> +				if (put_user(reg, uindices))
> +					return -EFAULT;
> +				uindices++;
> +			}
> +		}
> +	}
> +
> +	return n1 + n2;
> +}
> +
> +static inline unsigned long num_timer_regs(void)
> +{
> +	return sizeof(struct kvm_riscv_timer) / sizeof(unsigned long);
> +}
> +
> +static int copy_timer_reg_indices(u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n = num_timer_regs();
> +
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	return n;
> +}
> +
> +static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
> +{
> +	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> +
> +	if (riscv_isa_extension_available(vcpu->arch.isa, f))
> +		return sizeof(cntx->fp.f) / sizeof(u32);
> +	else
> +		return 0;
> +}
> +
> +static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
> +					u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n = num_fp_f_regs(vcpu);
> +
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	return n;
> +}
> +
> +static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
> +{
> +	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> +
> +	if (riscv_isa_extension_available(vcpu->arch.isa, d))
> +		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
> +	else
> +		return 0;
> +}
> +
> +static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
> +					u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n = num_fp_d_regs(vcpu);
> +	u64 reg;
> +
> +	/* copy fp.d.f indeices */

indices

> +	for (i = 0; i < n-1; i++) {
> +		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	/* copy fp.d.fcsr indeices */

indices

> +	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
> +	if (uindices) {
> +		if (put_user(reg, uindices))
> +			return -EFAULT;
> +	}
> +
> +	return n;
> +}
> +
> +static inline unsigned long num_isa_ext_regs(void)
> +{
> +	return KVM_RISCV_ISA_EXT_MAX;
> +}
> +
> +static int copy_isa_ext_reg_indices(u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n = num_isa_ext_regs();
> +
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_ISA_EXT | i;
                                          ^ size-ulong

> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	return n;
> +}
> +
> +static inline unsigned long num_sbi_ext_regs(void)
> +{
> +	/* number of KVM_REG_RISCV_SBI_SINGLE +
> +	 *  2x(number of KVM_REG_RISCV_SBI_MULTI)
> +	 */

Please use an opening wing '/*' on comments.

> +	return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
> +}
> +
> +static int copy_sbi_ext_reg_indices(u64 __user *uindices)
> +{
> +	unsigned int i;
> +	int n;
> +
> +	/* copy KVM_REG_RISCV_SBI_SINGLE */
> +	n = KVM_RISCV_SBI_EXT_MAX;
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
                                          ^ size-ulong

> +				  KVM_REG_RISCV_SBI_SINGLE | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	/* copy KVM_REG_RISCV_SBI_MULTI */
> +	n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
> +	for (i = 0; i < n; i++) {
> +		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
                                          ^ size-ulong

> +				  KVM_REG_RISCV_SBI_MULTI_EN | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +
> +		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
> +			  KVM_REG_RISCV_SBI_MULTI_DIS | i;
> +
> +		if (uindices) {
> +			if (put_user(reg, uindices))
> +				return -EFAULT;
> +			uindices++;
> +		}
> +	}
> +
> +	return num_sbi_ext_regs();
> +}
> +
> +/**
> + * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
> + *
> + * This is for all registers.
> + */
> +static unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
> +{
> +	unsigned long res = 0;
> +
> +	res += num_config_regs();
> +	res += num_core_regs();
> +	res += num_csr_regs();
> +	res += num_timer_regs();
> +	res += num_fp_f_regs(vcpu);
> +	res += num_fp_d_regs(vcpu);
> +	res += num_isa_ext_regs();
> +	res += num_sbi_ext_regs();
> +
> +	return res;
> +}
> +
> +/**
> + * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
> + */
> +static int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
> +				u64 __user *uindices)
> +{
> +	int ret;
> +
> +	ret = copy_config_reg_indices(uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_core_reg_indices(uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_csr_reg_indices(uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_timer_reg_indices(uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_fp_f_reg_indices(vcpu, uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_fp_d_reg_indices(vcpu, uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_isa_ext_reg_indices(uindices);
> +	if (ret < 0)
> +		return ret;
> +	uindices += ret;
> +
> +	ret = copy_sbi_ext_reg_indices(uindices);
> +	if (ret < 0)
> +		return ret;
> +
> +	return 0;
> +}
> +
>  static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
>  				  const struct kvm_one_reg *reg)
>  {
> @@ -758,6 +1086,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
>  			r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
>  		break;
>  	}
> +	case KVM_GET_REG_LIST: {
> +		struct kvm_reg_list __user *user_list = argp;
> +		struct kvm_reg_list reg_list;
> +		unsigned int n;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
> +			break;
> +		n = reg_list.n;
> +		reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
> +		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
> +			break;
> +		r = -E2BIG;
> +		if (n < reg_list.n)
> +			break;
> +		r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
> +		break;
> +	}
>  	default:
>  		break;
>  	}
> -- 
> 2.34.1
>

Otherwise,

Reviewed-by: Andrew Jones <ajones@ventanamicro.com>

Thanks,
drew
Andrew Jones May 11, 2023, 4:40 p.m. UTC | #2
On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> KVM_GET_REG_LIST API will return all registers that are available to
> KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> regression issue during VM migration.
> 
> Since this API was already supported on arm64, it'd be straightforward
> to enable it on riscv with similar code structure.
> 
> Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
> ---
>  Documentation/virt/kvm/api.rst |   2 +-
>  arch/riscv/kvm/vcpu.c          | 346 +++++++++++++++++++++++++++++++++
>  2 files changed, 347 insertions(+), 1 deletion(-)
> 
> diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> index add067793b90..280e89abd004 100644
> --- a/Documentation/virt/kvm/api.rst
> +++ b/Documentation/virt/kvm/api.rst
> @@ -3499,7 +3499,7 @@ VCPU matching underlying host.
>  ---------------------
>  
>  :Capability: basic
> -:Architectures: arm64, mips
> +:Architectures: arm64, mips, riscv
>  :Type: vcpu ioctl
>  :Parameters: struct kvm_reg_list (in/out)
>  :Returns: 0 on success; -1 on error
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 8bd9f2a8a0b9..fb8834e4fa15 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -657,6 +657,334 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
>  	return 0;
>  }
>  
> +static inline unsigned long num_config_regs(void)
> +{
> +	return sizeof(struct kvm_riscv_config) / sizeof(unsigned long);

We can't assume all config registers are present. For example,
zicbom and zicboz block size registers are only present when their
respective extensions are available.

Thanks,
drew
Conor Dooley May 11, 2023, 10:25 p.m. UTC | #3
On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> KVM_GET_REG_LIST API will return all registers that are available to
> KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> regression issue during VM migration.
> 
> Since this API was already supported on arm64, it'd be straightforward
> to enable it on riscv with similar code structure.

Applied on top of v6.4-rc1 this breaks the build :/

warning: Function parameter or member 'vcpu' not described in 'kvm_riscv_vcpu_num_regs'
warning: Function parameter or member 'uindices' not described in 'kvm_riscv_vcpu_copy_reg_indices'
warning: Function parameter or member 'vcpu' not described in 'kvm_riscv_vcpu_copy_reg_indices'

You have a bunch of kerneldoc comments (the ones with /**) that are not
valid kerneldoc. Apparently allmodconfig catches that!

Cheers,
Conor.
Conor Dooley May 11, 2023, 10:48 p.m. UTC | #4
On Thu, May 11, 2023 at 11:25:41PM +0100, Conor Dooley wrote:
> On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> > KVM_GET_REG_LIST API will return all registers that are available to
> > KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> > regression issue during VM migration.
> > 
> > Since this API was already supported on arm64, it'd be straightforward
> > to enable it on riscv with similar code structure.
> 
> Applied on top of v6.4-rc1 this breaks the build :/

I lied, I forgot W=1 is enabled for the allmodconfig builds in the
patchwork automation.
The warnings are trivial to fix, so you should fix them anyway!

> warning: Function parameter or member 'vcpu' not described in 'kvm_riscv_vcpu_num_regs'
> warning: Function parameter or member 'uindices' not described in 'kvm_riscv_vcpu_copy_reg_indices'
> warning: Function parameter or member 'vcpu' not described in 'kvm_riscv_vcpu_copy_reg_indices'
> 
> You have a bunch of kerneldoc comments (the ones with /**) that are not
> valid kerneldoc. Apparently allmodconfig catches that!
> 
> Cheers,
> Conor.
Haibo Xu May 15, 2023, 6:21 a.m. UTC | #5
On Thu, May 11, 2023 at 6:44 PM Andrew Jones <ajones@ventanamicro.com> wrote:
>
> On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> > KVM_GET_REG_LIST API will return all registers that are available to
> > KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> > regression issue during VM migration.
> >
> > Since this API was already supported on arm64, it'd be straightforward
>
> s/it'd be/it is/
>
> > to enable it on riscv with similar code structure.
> >
> > Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
> > ---
> >  Documentation/virt/kvm/api.rst |   2 +-
> >  arch/riscv/kvm/vcpu.c          | 346 +++++++++++++++++++++++++++++++++
> >  2 files changed, 347 insertions(+), 1 deletion(-)
> >
> > diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> > index add067793b90..280e89abd004 100644
> > --- a/Documentation/virt/kvm/api.rst
> > +++ b/Documentation/virt/kvm/api.rst
> > @@ -3499,7 +3499,7 @@ VCPU matching underlying host.
> >  ---------------------
> >
> >  :Capability: basic
> > -:Architectures: arm64, mips
> > +:Architectures: arm64, mips, riscv
> >  :Type: vcpu ioctl
> >  :Parameters: struct kvm_reg_list (in/out)
> >  :Returns: 0 on success; -1 on error
> > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > index 8bd9f2a8a0b9..fb8834e4fa15 100644
> > --- a/arch/riscv/kvm/vcpu.c
> > +++ b/arch/riscv/kvm/vcpu.c
> > @@ -657,6 +657,334 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
> >       return 0;
> >  }
> >
> > +static inline unsigned long num_config_regs(void)
> > +{
> > +     return sizeof(struct kvm_riscv_config) / sizeof(unsigned long);
> > +}
> > +
> > +static int copy_config_reg_indices(u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n = num_config_regs();
> > +
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CONFIG | i;
>                                           ^ this should be
>                                           size-ulong
>
>   u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
>   u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
>
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     return n;
> > +}
> > +
> > +static inline unsigned long num_core_regs(void)
> > +{
> > +     return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
> > +}
> > +
> > +static int copy_core_reg_indices(u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n = num_core_regs();
> > +
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CORE | i;
>                                           ^ size-ulong
>
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     return n;
> > +}
> > +
> > +static inline unsigned long num_csr_regs(void)
> > +{
> > +     unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
> > +
> > +     if (kvm_riscv_aia_available())
> > +             n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
> > +
> > +     return n;
> > +}
> > +
> > +static int copy_csr_reg_indices(u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
> > +     int n2 = 0;
> > +
> > +     /* copy general csr regs */
> > +     for (i = 0; i < n1; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR |
>                                           ^ size-ulong
>
> > +                               KVM_REG_RISCV_CSR_GENERAL | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     /* copy AIA csr regs */
> > +     if (kvm_riscv_aia_available()) {
> > +             n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
> > +
> > +             for (i = 0; i < n2; i++) {
> > +                     u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR |
>                                                   ^ size-ulong
>
> > +                                       KVM_REG_RISCV_CSR_AIA | i;
> > +
> > +                     if (uindices) {
> > +                             if (put_user(reg, uindices))
> > +                                     return -EFAULT;
> > +                             uindices++;
> > +                     }
> > +             }
> > +     }
> > +
> > +     return n1 + n2;
> > +}
> > +
> > +static inline unsigned long num_timer_regs(void)
> > +{
> > +     return sizeof(struct kvm_riscv_timer) / sizeof(unsigned long);
> > +}
> > +
> > +static int copy_timer_reg_indices(u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n = num_timer_regs();
> > +
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     return n;
> > +}
> > +
> > +static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
> > +{
> > +     const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> > +
> > +     if (riscv_isa_extension_available(vcpu->arch.isa, f))
> > +             return sizeof(cntx->fp.f) / sizeof(u32);
> > +     else
> > +             return 0;
> > +}
> > +
> > +static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
> > +                                     u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n = num_fp_f_regs(vcpu);
> > +
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     return n;
> > +}
> > +
> > +static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
> > +{
> > +     const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> > +
> > +     if (riscv_isa_extension_available(vcpu->arch.isa, d))
> > +             return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
> > +     else
> > +             return 0;
> > +}
> > +
> > +static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
> > +                                     u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n = num_fp_d_regs(vcpu);
> > +     u64 reg;
> > +
> > +     /* copy fp.d.f indeices */
>
> indices
>
> > +     for (i = 0; i < n-1; i++) {
> > +             reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     /* copy fp.d.fcsr indeices */
>
> indices
>
> > +     reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
> > +     if (uindices) {
> > +             if (put_user(reg, uindices))
> > +                     return -EFAULT;
> > +     }
> > +
> > +     return n;
> > +}
> > +
> > +static inline unsigned long num_isa_ext_regs(void)
> > +{
> > +     return KVM_RISCV_ISA_EXT_MAX;
> > +}
> > +
> > +static int copy_isa_ext_reg_indices(u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n = num_isa_ext_regs();
> > +
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_ISA_EXT | i;
>                                           ^ size-ulong
>
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     return n;
> > +}
> > +
> > +static inline unsigned long num_sbi_ext_regs(void)
> > +{
> > +     /* number of KVM_REG_RISCV_SBI_SINGLE +
> > +      *  2x(number of KVM_REG_RISCV_SBI_MULTI)
> > +      */
>
> Please use an opening wing '/*' on comments.
>
> > +     return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
> > +}
> > +
> > +static int copy_sbi_ext_reg_indices(u64 __user *uindices)
> > +{
> > +     unsigned int i;
> > +     int n;
> > +
> > +     /* copy KVM_REG_RISCV_SBI_SINGLE */
> > +     n = KVM_RISCV_SBI_EXT_MAX;
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
>                                           ^ size-ulong
>
> > +                               KVM_REG_RISCV_SBI_SINGLE | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     /* copy KVM_REG_RISCV_SBI_MULTI */
> > +     n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
> > +     for (i = 0; i < n; i++) {
> > +             u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
>                                           ^ size-ulong
>
> > +                               KVM_REG_RISCV_SBI_MULTI_EN | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +
> > +             reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
> > +                       KVM_REG_RISCV_SBI_MULTI_DIS | i;
> > +
> > +             if (uindices) {
> > +                     if (put_user(reg, uindices))
> > +                             return -EFAULT;
> > +                     uindices++;
> > +             }
> > +     }
> > +
> > +     return num_sbi_ext_regs();
> > +}
> > +
> > +/**
> > + * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
> > + *
> > + * This is for all registers.
> > + */
> > +static unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
> > +{
> > +     unsigned long res = 0;
> > +
> > +     res += num_config_regs();
> > +     res += num_core_regs();
> > +     res += num_csr_regs();
> > +     res += num_timer_regs();
> > +     res += num_fp_f_regs(vcpu);
> > +     res += num_fp_d_regs(vcpu);
> > +     res += num_isa_ext_regs();
> > +     res += num_sbi_ext_regs();
> > +
> > +     return res;
> > +}
> > +
> > +/**
> > + * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
> > + */
> > +static int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
> > +                             u64 __user *uindices)
> > +{
> > +     int ret;
> > +
> > +     ret = copy_config_reg_indices(uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_core_reg_indices(uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_csr_reg_indices(uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_timer_reg_indices(uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_fp_f_reg_indices(vcpu, uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_fp_d_reg_indices(vcpu, uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_isa_ext_reg_indices(uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +     uindices += ret;
> > +
> > +     ret = copy_sbi_ext_reg_indices(uindices);
> > +     if (ret < 0)
> > +             return ret;
> > +
> > +     return 0;
> > +}
> > +
> >  static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
> >                                 const struct kvm_one_reg *reg)
> >  {
> > @@ -758,6 +1086,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
> >                       r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
> >               break;
> >       }
> > +     case KVM_GET_REG_LIST: {
> > +             struct kvm_reg_list __user *user_list = argp;
> > +             struct kvm_reg_list reg_list;
> > +             unsigned int n;
> > +
> > +             r = -EFAULT;
> > +             if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
> > +                     break;
> > +             n = reg_list.n;
> > +             reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
> > +             if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
> > +                     break;
> > +             r = -E2BIG;
> > +             if (n < reg_list.n)
> > +                     break;
> > +             r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
> > +             break;
> > +     }
> >       default:
> >               break;
> >       }
> > --
> > 2.34.1
> >
>
> Otherwise,
>
> Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
>
> Thanks,
> drew

Thanks for your review, Andrew! The comments will be addressed in next version.
Haibo Xu May 15, 2023, 6:23 a.m. UTC | #6
On Fri, May 12, 2023 at 12:40 AM Andrew Jones <ajones@ventanamicro.com> wrote:
>
> On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> > KVM_GET_REG_LIST API will return all registers that are available to
> > KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> > regression issue during VM migration.
> >
> > Since this API was already supported on arm64, it'd be straightforward
> > to enable it on riscv with similar code structure.
> >
> > Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
> > ---
> >  Documentation/virt/kvm/api.rst |   2 +-
> >  arch/riscv/kvm/vcpu.c          | 346 +++++++++++++++++++++++++++++++++
> >  2 files changed, 347 insertions(+), 1 deletion(-)
> >
> > diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> > index add067793b90..280e89abd004 100644
> > --- a/Documentation/virt/kvm/api.rst
> > +++ b/Documentation/virt/kvm/api.rst
> > @@ -3499,7 +3499,7 @@ VCPU matching underlying host.
> >  ---------------------
> >
> >  :Capability: basic
> > -:Architectures: arm64, mips
> > +:Architectures: arm64, mips, riscv
> >  :Type: vcpu ioctl
> >  :Parameters: struct kvm_reg_list (in/out)
> >  :Returns: 0 on success; -1 on error
> > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > index 8bd9f2a8a0b9..fb8834e4fa15 100644
> > --- a/arch/riscv/kvm/vcpu.c
> > +++ b/arch/riscv/kvm/vcpu.c
> > @@ -657,6 +657,334 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
> >       return 0;
> >  }
> >
> > +static inline unsigned long num_config_regs(void)
> > +{
> > +     return sizeof(struct kvm_riscv_config) / sizeof(unsigned long);
>
> We can't assume all config registers are present. For example,
> zicbom and zicboz block size registers are only present when their
> respective extensions are available.
>
> Thanks,
> drew

Yes, I will filter out these kinds of registers in the next version.

Thanks,
Haibo
Haibo Xu May 15, 2023, 6:24 a.m. UTC | #7
On Fri, May 12, 2023 at 6:48 AM Conor Dooley <conor@kernel.org> wrote:
>
> On Thu, May 11, 2023 at 11:25:41PM +0100, Conor Dooley wrote:
> > On Thu, May 11, 2023 at 05:22:48PM +0800, Haibo Xu wrote:
> > > KVM_GET_REG_LIST API will return all registers that are available to
> > > KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
> > > regression issue during VM migration.
> > >
> > > Since this API was already supported on arm64, it'd be straightforward
> > > to enable it on riscv with similar code structure.
> >
> > Applied on top of v6.4-rc1 this breaks the build :/
>
> I lied, I forgot W=1 is enabled for the allmodconfig builds in the
> patchwork automation.
> The warnings are trivial to fix, so you should fix them anyway!
>

sure, I will fix them in the next version.

Thanks,
Haibo

> > warning: Function parameter or member 'vcpu' not described in 'kvm_riscv_vcpu_num_regs'
> > warning: Function parameter or member 'uindices' not described in 'kvm_riscv_vcpu_copy_reg_indices'
> > warning: Function parameter or member 'vcpu' not described in 'kvm_riscv_vcpu_copy_reg_indices'
> >
> > You have a bunch of kerneldoc comments (the ones with /**) that are not
> > valid kerneldoc. Apparently allmodconfig catches that!
> >
> > Cheers,
> > Conor.
>
>
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index add067793b90..280e89abd004 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -3499,7 +3499,7 @@  VCPU matching underlying host.
 ---------------------
 
 :Capability: basic
-:Architectures: arm64, mips
+:Architectures: arm64, mips, riscv
 :Type: vcpu ioctl
 :Parameters: struct kvm_reg_list (in/out)
 :Returns: 0 on success; -1 on error
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 8bd9f2a8a0b9..fb8834e4fa15 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -657,6 +657,334 @@  static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+static inline unsigned long num_config_regs(void)
+{
+	return sizeof(struct kvm_riscv_config) / sizeof(unsigned long);
+}
+
+static int copy_config_reg_indices(u64 __user *uindices)
+{
+	unsigned int i;
+	int n = num_config_regs();
+
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CONFIG | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	return n;
+}
+
+static inline unsigned long num_core_regs(void)
+{
+	return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
+}
+
+static int copy_core_reg_indices(u64 __user *uindices)
+{
+	unsigned int i;
+	int n = num_core_regs();
+
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CORE | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	return n;
+}
+
+static inline unsigned long num_csr_regs(void)
+{
+	unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+
+	if (kvm_riscv_aia_available())
+		n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+	return n;
+}
+
+static int copy_csr_reg_indices(u64 __user *uindices)
+{
+	unsigned int i;
+	int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+	int n2 = 0;
+
+	/* copy general csr regs */
+	for (i = 0; i < n1; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR |
+				  KVM_REG_RISCV_CSR_GENERAL | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	/* copy AIA csr regs */
+	if (kvm_riscv_aia_available()) {
+		n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+		for (i = 0; i < n2; i++) {
+			u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_CSR |
+					  KVM_REG_RISCV_CSR_AIA | i;
+
+			if (uindices) {
+				if (put_user(reg, uindices))
+					return -EFAULT;
+				uindices++;
+			}
+		}
+	}
+
+	return n1 + n2;
+}
+
+static inline unsigned long num_timer_regs(void)
+{
+	return sizeof(struct kvm_riscv_timer) / sizeof(unsigned long);
+}
+
+static int copy_timer_reg_indices(u64 __user *uindices)
+{
+	unsigned int i;
+	int n = num_timer_regs();
+
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	return n;
+}
+
+static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
+{
+	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+	if (riscv_isa_extension_available(vcpu->arch.isa, f))
+		return sizeof(cntx->fp.f) / sizeof(u32);
+	else
+		return 0;
+}
+
+static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
+					u64 __user *uindices)
+{
+	unsigned int i;
+	int n = num_fp_f_regs(vcpu);
+
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	return n;
+}
+
+static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
+{
+	const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+	if (riscv_isa_extension_available(vcpu->arch.isa, d))
+		return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
+	else
+		return 0;
+}
+
+static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
+					u64 __user *uindices)
+{
+	unsigned int i;
+	int n = num_fp_d_regs(vcpu);
+	u64 reg;
+
+	/* copy fp.d.f indeices */
+	for (i = 0; i < n-1; i++) {
+		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	/* copy fp.d.fcsr indeices */
+	reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
+	if (uindices) {
+		if (put_user(reg, uindices))
+			return -EFAULT;
+	}
+
+	return n;
+}
+
+static inline unsigned long num_isa_ext_regs(void)
+{
+	return KVM_RISCV_ISA_EXT_MAX;
+}
+
+static int copy_isa_ext_reg_indices(u64 __user *uindices)
+{
+	unsigned int i;
+	int n = num_isa_ext_regs();
+
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_ISA_EXT | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	return n;
+}
+
+static inline unsigned long num_sbi_ext_regs(void)
+{
+	/* number of KVM_REG_RISCV_SBI_SINGLE +
+	 *  2x(number of KVM_REG_RISCV_SBI_MULTI)
+	 */
+	return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
+}
+
+static int copy_sbi_ext_reg_indices(u64 __user *uindices)
+{
+	unsigned int i;
+	int n;
+
+	/* copy KVM_REG_RISCV_SBI_SINGLE */
+	n = KVM_RISCV_SBI_EXT_MAX;
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
+				  KVM_REG_RISCV_SBI_SINGLE | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	/* copy KVM_REG_RISCV_SBI_MULTI */
+	n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
+	for (i = 0; i < n; i++) {
+		u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
+				  KVM_REG_RISCV_SBI_MULTI_EN | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+
+		reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_SBI_EXT |
+			  KVM_REG_RISCV_SBI_MULTI_DIS | i;
+
+		if (uindices) {
+			if (put_user(reg, uindices))
+				return -EFAULT;
+			uindices++;
+		}
+	}
+
+	return num_sbi_ext_regs();
+}
+
+/**
+ * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
+ *
+ * This is for all registers.
+ */
+static unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
+{
+	unsigned long res = 0;
+
+	res += num_config_regs();
+	res += num_core_regs();
+	res += num_csr_regs();
+	res += num_timer_regs();
+	res += num_fp_f_regs(vcpu);
+	res += num_fp_d_regs(vcpu);
+	res += num_isa_ext_regs();
+	res += num_sbi_ext_regs();
+
+	return res;
+}
+
+/**
+ * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
+ */
+static int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+				u64 __user *uindices)
+{
+	int ret;
+
+	ret = copy_config_reg_indices(uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_core_reg_indices(uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_csr_reg_indices(uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_timer_reg_indices(uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_fp_f_reg_indices(vcpu, uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_fp_d_reg_indices(vcpu, uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_isa_ext_reg_indices(uindices);
+	if (ret < 0)
+		return ret;
+	uindices += ret;
+
+	ret = copy_sbi_ext_reg_indices(uindices);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
 				  const struct kvm_one_reg *reg)
 {
@@ -758,6 +1086,24 @@  long kvm_arch_vcpu_ioctl(struct file *filp,
 			r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
 		break;
 	}
+	case KVM_GET_REG_LIST: {
+		struct kvm_reg_list __user *user_list = argp;
+		struct kvm_reg_list reg_list;
+		unsigned int n;
+
+		r = -EFAULT;
+		if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+			break;
+		n = reg_list.n;
+		reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+		if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+			break;
+		r = -E2BIG;
+		if (n < reg_list.n)
+			break;
+		r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+		break;
+	}
 	default:
 		break;
 	}