diff mbox series

[v7,03/12] xen/arm: Expose SVE feature to the guest

Message ID 20230523074326.3035745-4-luca.fancellu@arm.com (mailing list archive)
State Superseded
Headers show
Series SVE feature for arm guests | expand

Commit Message

Luca Fancellu May 23, 2023, 7:43 a.m. UTC
When a guest is allowed to use SVE, expose the SVE features through
the identification registers.

Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
Acked-by: Julien Grall <jgrall@amazon.com>
---
Changes from v6:
 - code style fix, add A-by Julien
Changes from v5:
 - given the move of is_sve_domain() in asm/arm64/sve.h, add the
   header to vsysreg.c
 - dropping Bertrand's R-by because of the change
Changes from v4:
 - no changes
Changes from v3:
 - no changes
Changes from v2:
 - no changes
Changes from v1:
 - No changes
Changes from RFC:
 - No changes
---
 xen/arch/arm/arm64/vsysreg.c | 41 ++++++++++++++++++++++++++++++++++--
 1 file changed, 39 insertions(+), 2 deletions(-)

Comments

Bertrand Marquis May 24, 2023, 9:25 a.m. UTC | #1
Hi Luca,

> On 23 May 2023, at 09:43, Luca Fancellu <Luca.Fancellu@arm.com> wrote:
> 
> When a guest is allowed to use SVE, expose the SVE features through
> the identification registers.
> 
> Signed-off-by: Luca Fancellu <luca.fancellu@arm.com>
> Acked-by: Julien Grall <jgrall@amazon.com>

Reviewed-by: Bertrand Marquis <bertrand.marquis@arm.com>

Cheers
Bertrand

> ---
> Changes from v6:
> - code style fix, add A-by Julien
> Changes from v5:
> - given the move of is_sve_domain() in asm/arm64/sve.h, add the
>   header to vsysreg.c
> - dropping Bertrand's R-by because of the change
> Changes from v4:
> - no changes
> Changes from v3:
> - no changes
> Changes from v2:
> - no changes
> Changes from v1:
> - No changes
> Changes from RFC:
> - No changes
> ---
> xen/arch/arm/arm64/vsysreg.c | 41 ++++++++++++++++++++++++++++++++++--
> 1 file changed, 39 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/arm/arm64/vsysreg.c b/xen/arch/arm/arm64/vsysreg.c
> index 758750983c11..fe31f7b3827f 100644
> --- a/xen/arch/arm/arm64/vsysreg.c
> +++ b/xen/arch/arm/arm64/vsysreg.c
> @@ -18,6 +18,8 @@
> 
> #include <xen/sched.h>
> 
> +#include <asm/arm64/cpufeature.h>
> +#include <asm/arm64/sve.h>
> #include <asm/current.h>
> #include <asm/regs.h>
> #include <asm/traps.h>
> @@ -295,7 +297,28 @@ void do_sysreg(struct cpu_user_regs *regs,
>     GENERATE_TID3_INFO(MVFR0_EL1, mvfr, 0)
>     GENERATE_TID3_INFO(MVFR1_EL1, mvfr, 1)
>     GENERATE_TID3_INFO(MVFR2_EL1, mvfr, 2)
> -    GENERATE_TID3_INFO(ID_AA64PFR0_EL1, pfr64, 0)
> +
> +    case HSR_SYSREG_ID_AA64PFR0_EL1:
> +    {
> +        register_t guest_reg_value = guest_cpuinfo.pfr64.bits[0];
> +
> +        if ( is_sve_domain(v->domain) )
> +        {
> +            /* 4 is the SVE field width in id_aa64pfr0_el1 */
> +            uint64_t mask = GENMASK(ID_AA64PFR0_SVE_SHIFT + 4 - 1,
> +                                    ID_AA64PFR0_SVE_SHIFT);
> +            /* sysval is the sve field on the system */
> +            uint64_t sysval = cpuid_feature_extract_unsigned_field_width(
> +                                system_cpuinfo.pfr64.bits[0],
> +                                ID_AA64PFR0_SVE_SHIFT, 4);
> +            guest_reg_value &= ~mask;
> +            guest_reg_value |= (sysval << ID_AA64PFR0_SVE_SHIFT) & mask;
> +        }
> +
> +        return handle_ro_read_val(regs, regidx, hsr.sysreg.read, hsr, 1,
> +                                  guest_reg_value);
> +    }
> +
>     GENERATE_TID3_INFO(ID_AA64PFR1_EL1, pfr64, 1)
>     GENERATE_TID3_INFO(ID_AA64DFR0_EL1, dbg64, 0)
>     GENERATE_TID3_INFO(ID_AA64DFR1_EL1, dbg64, 1)
> @@ -306,7 +329,21 @@ void do_sysreg(struct cpu_user_regs *regs,
>     GENERATE_TID3_INFO(ID_AA64MMFR2_EL1, mm64, 2)
>     GENERATE_TID3_INFO(ID_AA64AFR0_EL1, aux64, 0)
>     GENERATE_TID3_INFO(ID_AA64AFR1_EL1, aux64, 1)
> -    GENERATE_TID3_INFO(ID_AA64ZFR0_EL1, zfr64, 0)
> +
> +    case HSR_SYSREG_ID_AA64ZFR0_EL1:
> +    {
> +        /*
> +         * When the guest has the SVE feature enabled, the whole id_aa64zfr0_el1
> +         * needs to be exposed.
> +         */
> +        register_t guest_reg_value = guest_cpuinfo.zfr64.bits[0];
> +
> +        if ( is_sve_domain(v->domain) )
> +            guest_reg_value = system_cpuinfo.zfr64.bits[0];
> +
> +        return handle_ro_read_val(regs, regidx, hsr.sysreg.read, hsr, 1,
> +                                  guest_reg_value);
> +    }
> 
>     /*
>      * Those cases are catching all Reserved registers trapped by TID3 which
> -- 
> 2.34.1
>
diff mbox series

Patch

diff --git a/xen/arch/arm/arm64/vsysreg.c b/xen/arch/arm/arm64/vsysreg.c
index 758750983c11..fe31f7b3827f 100644
--- a/xen/arch/arm/arm64/vsysreg.c
+++ b/xen/arch/arm/arm64/vsysreg.c
@@ -18,6 +18,8 @@ 
 
 #include <xen/sched.h>
 
+#include <asm/arm64/cpufeature.h>
+#include <asm/arm64/sve.h>
 #include <asm/current.h>
 #include <asm/regs.h>
 #include <asm/traps.h>
@@ -295,7 +297,28 @@  void do_sysreg(struct cpu_user_regs *regs,
     GENERATE_TID3_INFO(MVFR0_EL1, mvfr, 0)
     GENERATE_TID3_INFO(MVFR1_EL1, mvfr, 1)
     GENERATE_TID3_INFO(MVFR2_EL1, mvfr, 2)
-    GENERATE_TID3_INFO(ID_AA64PFR0_EL1, pfr64, 0)
+
+    case HSR_SYSREG_ID_AA64PFR0_EL1:
+    {
+        register_t guest_reg_value = guest_cpuinfo.pfr64.bits[0];
+
+        if ( is_sve_domain(v->domain) )
+        {
+            /* 4 is the SVE field width in id_aa64pfr0_el1 */
+            uint64_t mask = GENMASK(ID_AA64PFR0_SVE_SHIFT + 4 - 1,
+                                    ID_AA64PFR0_SVE_SHIFT);
+            /* sysval is the sve field on the system */
+            uint64_t sysval = cpuid_feature_extract_unsigned_field_width(
+                                system_cpuinfo.pfr64.bits[0],
+                                ID_AA64PFR0_SVE_SHIFT, 4);
+            guest_reg_value &= ~mask;
+            guest_reg_value |= (sysval << ID_AA64PFR0_SVE_SHIFT) & mask;
+        }
+
+        return handle_ro_read_val(regs, regidx, hsr.sysreg.read, hsr, 1,
+                                  guest_reg_value);
+    }
+
     GENERATE_TID3_INFO(ID_AA64PFR1_EL1, pfr64, 1)
     GENERATE_TID3_INFO(ID_AA64DFR0_EL1, dbg64, 0)
     GENERATE_TID3_INFO(ID_AA64DFR1_EL1, dbg64, 1)
@@ -306,7 +329,21 @@  void do_sysreg(struct cpu_user_regs *regs,
     GENERATE_TID3_INFO(ID_AA64MMFR2_EL1, mm64, 2)
     GENERATE_TID3_INFO(ID_AA64AFR0_EL1, aux64, 0)
     GENERATE_TID3_INFO(ID_AA64AFR1_EL1, aux64, 1)
-    GENERATE_TID3_INFO(ID_AA64ZFR0_EL1, zfr64, 0)
+
+    case HSR_SYSREG_ID_AA64ZFR0_EL1:
+    {
+        /*
+         * When the guest has the SVE feature enabled, the whole id_aa64zfr0_el1
+         * needs to be exposed.
+         */
+        register_t guest_reg_value = guest_cpuinfo.zfr64.bits[0];
+
+        if ( is_sve_domain(v->domain) )
+            guest_reg_value = system_cpuinfo.zfr64.bits[0];
+
+        return handle_ro_read_val(regs, regidx, hsr.sysreg.read, hsr, 1,
+                                  guest_reg_value);
+    }
 
     /*
      * Those cases are catching all Reserved registers trapped by TID3 which