@@ -102,7 +102,7 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
-#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
+#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
/* TCR_EL2 Registers bits */
@@ -355,6 +355,8 @@ enum vcpu_sysreg {
CNTP_CVAL_EL0,
CNTP_CTL_EL0,
+ FPMR,
+
/* Memory Tagging Extension registers */
RGSR_EL1, /* Random Allocation Tag Seed Register */
GCR_EL1, /* Tag Control Register */
@@ -481,7 +483,6 @@ struct kvm_vcpu_arch {
enum fp_type fp_type;
unsigned int sve_max_vl;
u64 svcr;
- u64 fpmr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
@@ -540,6 +541,7 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch external_debug_state;
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
+ u64 *host_fpmr; /* hyp VA */
struct task_struct *parent_task;
struct {
@@ -14,6 +14,16 @@
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
+static void *fpsimd_share_end(struct user_fpsimd_state *fpsimd)
+{
+ void *share_end = fpsimd + 1;
+
+ if (cpus_have_final_cap(ARM64_HAS_FPMR))
+ share_end += sizeof(u64);
+
+ return share_end;
+}
+
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
{
struct task_struct *p = vcpu->arch.parent_task;
@@ -23,7 +33,7 @@ void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
return;
fpsimd = &p->thread.uw.fpsimd_state;
- kvm_unshare_hyp(fpsimd, fpsimd + 1);
+ kvm_unshare_hyp(fpsimd, fpsimd_share_end(fpsimd));
put_task_struct(p);
}
@@ -45,11 +55,15 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
kvm_vcpu_unshare_task_fp(vcpu);
/* Make sure the host task fpsimd state is visible to hyp: */
- ret = kvm_share_hyp(fpsimd, fpsimd + 1);
+ ret = kvm_share_hyp(fpsimd, fpsimd_share_end(fpsimd));
if (ret)
return ret;
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+ if (cpus_have_final_cap(ARM64_HAS_FPMR)) {
+ WARN_ON_ONCE(¤t->thread.fpmr + 1 != fpsimd_share_end(fpsimd));
+ vcpu->arch.host_fpmr = kern_hyp_va(¤t->thread.fpmr);
+ }
/*
* We need to keep current's task_struct pinned until its data has been
@@ -153,7 +167,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
fp_state.sve_vl = vcpu->arch.sve_max_vl;
fp_state.sme_state = NULL;
fp_state.svcr = &vcpu->arch.svcr;
- fp_state.fpmr = &vcpu->arch.fpmr;
+ fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
fp_state.fp_type = &vcpu->arch.fp_type;
if (vcpu_has_sve(vcpu))
@@ -322,10 +322,15 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
- if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
+ if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED) {
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
+ if (cpus_have_final_cap(ARM64_HAS_FPMR))
+ *vcpu->arch.host_fpmr = read_sysreg_s(SYS_FPMR);
+ }
/* Restore the guest state */
+ if (cpus_have_final_cap(ARM64_HAS_FPMR))
+ write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
if (sve_guest)
__hyp_sve_restore_guest(vcpu);
else
@@ -1806,6 +1806,15 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
.visibility = elx2_visibility, \
}
+static unsigned int fpmr_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (cpus_have_final_cap(ARM64_HAS_FPMR))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
/*
* Since reset() callback and field val are not used for idregs, they will be
* used for specific purposes for idregs.
@@ -2165,6 +2174,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_SVCR), undef_access },
+ { SYS_DESC(SYS_FPMR), access_rw, reset_unknown, FPMR,
+ .visibility = fpmr_visibility },
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
.reset = reset_pmcr, .reg = PMCR_EL0 },
FEAT_FPMR introduces a new system register FPMR which allows configuration of floating point behaviour, currently for FP8 specific features. Allow use of this in guests, disabling the trap while guests are running and saving and restoring the value along with the rest of the floating point state. Since FPMR is stored immediately after the main floating point state we share it with the hypervisor by adjusting the size of the shared region. Access to FPMR is covered by both a register specific trap HCRX_EL2.EnFPM and the overall floating point access trap so we just unconditionally enable the FPMR specific trap and rely on the floating point access trap to detect guest floating point usage. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/include/asm/kvm_arm.h | 2 +- arch/arm64/include/asm/kvm_host.h | 4 +++- arch/arm64/kvm/fpsimd.c | 20 +++++++++++++++++--- arch/arm64/kvm/hyp/include/hyp/switch.h | 7 ++++++- arch/arm64/kvm/sys_regs.c | 11 +++++++++++ 5 files changed, 38 insertions(+), 6 deletions(-)