diff mbox series

[v2,2/8] KVM: arm64: Remove host FPSIMD saving for non-protected KVM

Message ID 20250206141102.954688-3-mark.rutland@arm.com (mailing list archive)
State New
Headers show
Series KVM: arm64: FPSIMD/SVE/SME fixes | expand

Commit Message

Mark Rutland Feb. 6, 2025, 2:10 p.m. UTC
Now that the host eagerly saves its own FPSIMD/SVE/SME state,
non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
and the code to do this is never used. Protected KVM still needs to
save/restore the host FPSIMD/SVE state to avoid leaking guest state to
the host (and to avoid revealing to the host whether the guest used
FPSIMD/SVE/SME), and that code needs to be retained.

Remove the unused code and data structures.

To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
VHE hyp code, the nVHE/hVHE version is moved into the shared switch
header, where it is only invoked when KVM is in protected mode.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Oliver Upton <oliver.upton@linux.dev>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h       | 20 +++++-------------
 arch/arm64/kvm/arm.c                    |  8 -------
 arch/arm64/kvm/fpsimd.c                 |  2 --
 arch/arm64/kvm/hyp/include/hyp/switch.h | 25 ++++++++++++++++++++--
 arch/arm64/kvm/hyp/nvhe/hyp-main.c      |  2 +-
 arch/arm64/kvm/hyp/nvhe/switch.c        | 28 -------------------------
 arch/arm64/kvm/hyp/vhe/switch.c         |  8 -------
 7 files changed, 29 insertions(+), 64 deletions(-)

Comments

Will Deacon Feb. 10, 2025, 4:12 p.m. UTC | #1
On Thu, Feb 06, 2025 at 02:10:56PM +0000, Mark Rutland wrote:
> Now that the host eagerly saves its own FPSIMD/SVE/SME state,
> non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
> and the code to do this is never used. Protected KVM still needs to
> save/restore the host FPSIMD/SVE state to avoid leaking guest state to
> the host (and to avoid revealing to the host whether the guest used
> FPSIMD/SVE/SME), and that code needs to be retained.
> 
> Remove the unused code and data structures.
> 
> To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
> VHE hyp code, the nVHE/hVHE version is moved into the shared switch
> header, where it is only invoked when KVM is in protected mode.
> 
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Reviewed-by: Mark Brown <broonie@kernel.org>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Fuad Tabba <tabba@google.com>
> Cc: Marc Zyngier <maz@kernel.org>
> Cc: Mark Brown <broonie@kernel.org>
> Cc: Oliver Upton <oliver.upton@linux.dev>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_host.h       | 20 +++++-------------
>  arch/arm64/kvm/arm.c                    |  8 -------
>  arch/arm64/kvm/fpsimd.c                 |  2 --
>  arch/arm64/kvm/hyp/include/hyp/switch.h | 25 ++++++++++++++++++++--
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c      |  2 +-
>  arch/arm64/kvm/hyp/nvhe/switch.c        | 28 -------------------------
>  arch/arm64/kvm/hyp/vhe/switch.c         |  8 -------
>  7 files changed, 29 insertions(+), 64 deletions(-)

[...]

> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index f838a45665f26..c5b8a11ac4f50 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -375,7 +375,28 @@ static inline void __hyp_sve_save_host(void)
>  			 true);
>  }
>  
> -static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
> +static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
> +{
> +	/*
> +	 * Non-protected kvm relies on the host restoring its sve state.
> +	 * Protected kvm restores the host's sve state as not to reveal that
> +	 * fpsimd was used by a guest nor leak upper sve bits.
> +	 */
> +	if (system_supports_sve()) {
> +		__hyp_sve_save_host();
> +
> +		/* Re-enable SVE traps if not supported for the guest vcpu. */
> +		if (!vcpu_has_sve(vcpu))
> +			cpacr_clear_set(CPACR_EL1_ZEN, 0);
> +
> +	} else {
> +		__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
> +	}
> +
> +	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
> +		*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
> +}
> +
>  
>  /*
>   * We trap the first access to the FP/SIMD to save the host context and
> @@ -425,7 +446,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	isb();
>  
>  	/* Write out the host state if it's in the registers */
> -	if (host_owns_fp_regs())
> +	if (is_protected_kvm_enabled() && host_owns_fp_regs())
>  		kvm_hyp_save_fpsimd_host(vcpu);

I wondered briefly whether this would allow us to clean up the CPACR
handling a little and avoid the conditional SVE trap re-enabling inside
kvm_hyp_save_fpsimd_host() but I couldn't come up with a clean way to
do it without an additional ISB. Hrm.

Anyway, as far as the patch goes:

Acked-by: Will Deacon <will@kernel.org>

Will
Mark Rutland Feb. 10, 2025, 4:59 p.m. UTC | #2
On Mon, Feb 10, 2025 at 04:12:43PM +0000, Will Deacon wrote:
> On Thu, Feb 06, 2025 at 02:10:56PM +0000, Mark Rutland wrote:
> > Now that the host eagerly saves its own FPSIMD/SVE/SME state,
> > non-protected KVM never needs to save the host FPSIMD/SVE/SME state,
> > and the code to do this is never used. Protected KVM still needs to
> > save/restore the host FPSIMD/SVE state to avoid leaking guest state to
> > the host (and to avoid revealing to the host whether the guest used
> > FPSIMD/SVE/SME), and that code needs to be retained.
> > 
> > Remove the unused code and data structures.
> > 
> > To avoid the need for a stub copy of kvm_hyp_save_fpsimd_host() in the
> > VHE hyp code, the nVHE/hVHE version is moved into the shared switch
> > header, where it is only invoked when KVM is in protected mode.
> > 
> > Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> > Reviewed-by: Mark Brown <broonie@kernel.org>
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: Fuad Tabba <tabba@google.com>
> > Cc: Marc Zyngier <maz@kernel.org>
> > Cc: Mark Brown <broonie@kernel.org>
> > Cc: Oliver Upton <oliver.upton@linux.dev>
> > Cc: Will Deacon <will@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_host.h       | 20 +++++-------------
> >  arch/arm64/kvm/arm.c                    |  8 -------
> >  arch/arm64/kvm/fpsimd.c                 |  2 --
> >  arch/arm64/kvm/hyp/include/hyp/switch.h | 25 ++++++++++++++++++++--
> >  arch/arm64/kvm/hyp/nvhe/hyp-main.c      |  2 +-
> >  arch/arm64/kvm/hyp/nvhe/switch.c        | 28 -------------------------
> >  arch/arm64/kvm/hyp/vhe/switch.c         |  8 -------
> >  7 files changed, 29 insertions(+), 64 deletions(-)
> 
> [...]
> 
> > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> > index f838a45665f26..c5b8a11ac4f50 100644
> > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> > @@ -375,7 +375,28 @@ static inline void __hyp_sve_save_host(void)
> >  			 true);
> >  }
> >  
> > -static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
> > +static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
> > +{
> > +	/*
> > +	 * Non-protected kvm relies on the host restoring its sve state.
> > +	 * Protected kvm restores the host's sve state as not to reveal that
> > +	 * fpsimd was used by a guest nor leak upper sve bits.
> > +	 */
> > +	if (system_supports_sve()) {
> > +		__hyp_sve_save_host();
> > +
> > +		/* Re-enable SVE traps if not supported for the guest vcpu. */
> > +		if (!vcpu_has_sve(vcpu))
> > +			cpacr_clear_set(CPACR_EL1_ZEN, 0);
> > +
> > +	} else {
> > +		__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
> > +	}
> > +
> > +	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
> > +		*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
> > +}
> > +
> >  
> >  /*
> >   * We trap the first access to the FP/SIMD to save the host context and
> > @@ -425,7 +446,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> >  	isb();
> >  
> >  	/* Write out the host state if it's in the registers */
> > -	if (host_owns_fp_regs())
> > +	if (is_protected_kvm_enabled() && host_owns_fp_regs())
> >  		kvm_hyp_save_fpsimd_host(vcpu);
> 
> I wondered briefly whether this would allow us to clean up the CPACR
> handling a little and avoid the conditional SVE trap re-enabling inside
> kvm_hyp_save_fpsimd_host() but I couldn't come up with a clean way to
> do it without an additional ISB. Hrm.
> 
> Anyway, as far as the patch goes:
> 
> Acked-by: Will Deacon <will@kernel.org>

Thanks!

FWIW, I'd also considered that, and I'd concluded that if anything we
could do a subsequent simplification by pulling that out of
kvm_hyp_save_fpsimd_host() and have kvm_hyp_handle_fpsimd() do something
like:

| static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
| {
| 	...
| 
| 	/* Valid trap */
| 
| 	/*
| 	 * Enable everything EL2 might need to save/restore state.
| 	 * Maybe each of the bits should depend on system_has_xxx()
| 	 */
| 	cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN */
| 	isb();
| 
| 	...
| 
| 	/* Write out the host state if it's in the registers */
| 	if (is_protected_kvm_enabled() && host_owns_fp_regs())
| 		kvm_hyp_save_fpsimd_host(vcpu);
| 	
| 	/* Restore guest state */
| 
| 	...
| 
| 	/*
| 	 * Enable traps for the VCPU. The ERET will cause the traps to
| 	 * take effect in the guest, so no ISB is necessary.
| 	 */
| 	cpacr_guest = CPACR_EL1_FPEN;
| 	if (vcpu_has_sve(vcpu))
| 		cpacr_guest |= CPACR_EL1_ZEN;
| 	if (vcpu_has_sme(vcpu))			// whenever we add this
| 		cpacr_guest |= CPACR_EL1_SMEN;
| 	cpacr_clear_set(CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN,
| 			cpacr_guest);
| 
| 	return true;
| }

... where we'd still have the CPACR write to re-enable traps, but it'd
be unconditional, and wouldn't need an extra ISB.

If that makes sense to you, I can go spin that as a subsequent cleanup
atop this series.

Mark.
Will Deacon Feb. 10, 2025, 6:06 p.m. UTC | #3
On Mon, Feb 10, 2025 at 04:59:56PM +0000, Mark Rutland wrote:
> On Mon, Feb 10, 2025 at 04:12:43PM +0000, Will Deacon wrote:
> > On Thu, Feb 06, 2025 at 02:10:56PM +0000, Mark Rutland wrote:
> | static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> | {
> | 	...
> | 
> | 	/* Valid trap */
> | 
> | 	/*
> | 	 * Enable everything EL2 might need to save/restore state.
> | 	 * Maybe each of the bits should depend on system_has_xxx()
> | 	 */
> | 	cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN */
> | 	isb();
> | 
> | 	...
> | 
> | 	/* Write out the host state if it's in the registers */
> | 	if (is_protected_kvm_enabled() && host_owns_fp_regs())
> | 		kvm_hyp_save_fpsimd_host(vcpu);
> | 	
> | 	/* Restore guest state */
> | 
> | 	...
> | 
> | 	/*
> | 	 * Enable traps for the VCPU. The ERET will cause the traps to
> | 	 * take effect in the guest, so no ISB is necessary.
> | 	 */
> | 	cpacr_guest = CPACR_EL1_FPEN;
> | 	if (vcpu_has_sve(vcpu))
> | 		cpacr_guest |= CPACR_EL1_ZEN;
> | 	if (vcpu_has_sme(vcpu))			// whenever we add this
> | 		cpacr_guest |= CPACR_EL1_SMEN;
> | 	cpacr_clear_set(CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN,
> | 			cpacr_guest);
> | 
> | 	return true;
> | }
> 
> ... where we'd still have the CPACR write to re-enable traps, but it'd
> be unconditional, and wouldn't need an extra ISB.
> 
> If that makes sense to you, I can go spin that as a subsequent cleanup
> atop this series.

That looks very clean, yes please! Don't forget to drop the part from
kvm_hyp_save_fpsimd_host() too.

Will
Mark Rutland Feb. 10, 2025, 8:03 p.m. UTC | #4
On Mon, Feb 10, 2025 at 06:06:38PM +0000, Will Deacon wrote:
> On Mon, Feb 10, 2025 at 04:59:56PM +0000, Mark Rutland wrote:
> > On Mon, Feb 10, 2025 at 04:12:43PM +0000, Will Deacon wrote:
> > > On Thu, Feb 06, 2025 at 02:10:56PM +0000, Mark Rutland wrote:
> > | static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> > | {
> > | 	...
> > | 
> > | 	/* Valid trap */
> > | 
> > | 	/*
> > | 	 * Enable everything EL2 might need to save/restore state.
> > | 	 * Maybe each of the bits should depend on system_has_xxx()
> > | 	 */
> > | 	cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN */
> > | 	isb();
> > | 
> > | 	...
> > | 
> > | 	/* Write out the host state if it's in the registers */
> > | 	if (is_protected_kvm_enabled() && host_owns_fp_regs())
> > | 		kvm_hyp_save_fpsimd_host(vcpu);
> > | 	
> > | 	/* Restore guest state */
> > | 
> > | 	...
> > | 
> > | 	/*
> > | 	 * Enable traps for the VCPU. The ERET will cause the traps to
> > | 	 * take effect in the guest, so no ISB is necessary.
> > | 	 */
> > | 	cpacr_guest = CPACR_EL1_FPEN;
> > | 	if (vcpu_has_sve(vcpu))
> > | 		cpacr_guest |= CPACR_EL1_ZEN;
> > | 	if (vcpu_has_sme(vcpu))			// whenever we add this
> > | 		cpacr_guest |= CPACR_EL1_SMEN;
> > | 	cpacr_clear_set(CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN,
> > | 			cpacr_guest);
> > | 
> > | 	return true;
> > | }
> > 
> > ... where we'd still have the CPACR write to re-enable traps, but it'd
> > be unconditional, and wouldn't need an extra ISB.
> > 
> > If that makes sense to you, I can go spin that as a subsequent cleanup
> > atop this series.
> 
> That looks very clean, yes please! Don't forget to drop the part from
> kvm_hyp_save_fpsimd_host() too.

Yep, that was the idea!

To avoid confusion: I've sent out v3 of this series *without* the
change, and I'll prepare that as a follow-up.

Mark.
Mark Rutland Feb. 11, 2025, 7:08 p.m. UTC | #5
On Mon, Feb 10, 2025 at 05:00:04PM +0000, Mark Rutland wrote:
> | static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
> | {
> | 	...
> | 
> | 	/* Valid trap */
> | 
> | 	/*
> | 	 * Enable everything EL2 might need to save/restore state.
> | 	 * Maybe each of the bits should depend on system_has_xxx()
> | 	 */
> | 	cpacr_clear_set(0, CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN */
> | 	isb();
> | 
> | 	...
> | 
> | 	/* Write out the host state if it's in the registers */
> | 	if (is_protected_kvm_enabled() && host_owns_fp_regs())
> | 		kvm_hyp_save_fpsimd_host(vcpu);
> | 	
> | 	/* Restore guest state */
> | 
> | 	...
> | 
> | 	/*
> | 	 * Enable traps for the VCPU. The ERET will cause the traps to
> | 	 * take effect in the guest, so no ISB is necessary.
> | 	 */
> | 	cpacr_guest = CPACR_EL1_FPEN;
> | 	if (vcpu_has_sve(vcpu))
> | 		cpacr_guest |= CPACR_EL1_ZEN;
> | 	if (vcpu_has_sme(vcpu))			// whenever we add this
> | 		cpacr_guest |= CPACR_EL1_SMEN;
> | 	cpacr_clear_set(CPACR_EL1_FPEN | CPACR_EL1_ZEN | CPACR_EL1_SMEN,
> | 			cpacr_guest);
> | 
> | 	return true;
> | }
> 
> ... where we'd still have the CPACR write to re-enable traps, but it'd
> be unconditional, and wouldn't need an extra ISB.

I had a quick go at this, and there are a few things that I spotted that
got in the way, so I'm not going to spin that immediately, but I'd be
happy to in a short while. Notes below:

(1) I looked at using __activate_cptr_traps() and
    __deactivate_cptr_traps() rather than poking CPACR/CPTR directly,
    to ensure that this is kept in-sync with the regular guest<->host
    transitions, but that requires a bit of refactoring (e.g. moving
    those *back* into the common header), and potentially requires doing
    a bit of redundant work here, so I'm not sure whether that's
    actually preferable or not.

    If you have a strong preference either way as to doing that or
    poking CPACR/CPTR directly, knowing that would be helfpul.

(2) The cpacr_clear_set() function doesn't behave the same as
    sysreg_clear_set(), and doesn't handle the case where a field is in
    both the 'clr' and 'set' masks as is the case above. For example, if
    one writes the following to clear THEN set the FPEN field, disabling
    traps:

    | cpacr_clear_set(CPACR_EL1_FPEN, CPACR_EL1_FPEN);

    ... the VHE code looks good:

    | mrs     x0, cpacr_el1
    | orr     x1, x0, #0x300000		// set both FPEN bits
    | cmp     x0, x1
    | b.eq    <<skip_write>>
    | msr     cpacr_el1, x1

    ... but the nVHE code does the opposite:

    | mrs     x0, cptr_el2
    | orr     x1, x0, #0x400		// set TFP
    | tbnz    w0, #10, <<skip_write>>
    | msr     cptr_el2, x1

    Luckily this does the right thing for all existing users, but that'd
    need to be cleaned up.

(3) We should be able to get rid of the ISB when writing to FPEXC32_EL2.
    That register has no effect while in AArch64 state, and the ERET
    will synchronize it before AArch32 guest code can be executed.

    That should probably be factored out into save/restore functions
    that are used consistently.

Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7cfa024de4e34..f56c07568591f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -624,23 +624,13 @@  struct kvm_host_data {
 	struct kvm_cpu_context host_ctxt;
 
 	/*
-	 * All pointers in this union are hyp VA.
+	 * Hyp VA.
 	 * sve_state is only used in pKVM and if system_supports_sve().
 	 */
-	union {
-		struct user_fpsimd_state *fpsimd_state;
-		struct cpu_sve_state *sve_state;
-	};
-
-	union {
-		/* HYP VA pointer to the host storage for FPMR */
-		u64	*fpmr_ptr;
-		/*
-		 * Used by pKVM only, as it needs to provide storage
-		 * for the host
-		 */
-		u64	fpmr;
-	};
+	struct cpu_sve_state *sve_state;
+
+	/* Used by pKVM only. */
+	u64	fpmr;
 
 	/* Ownership of the FP regs */
 	enum {
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 646e806c6ca69..027c8e9c4741f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2461,14 +2461,6 @@  static void finalize_init_hyp_mode(void)
 			per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state =
 				kern_hyp_va(sve_state);
 		}
-	} else {
-		for_each_possible_cpu(cpu) {
-			struct user_fpsimd_state *fpsimd_state;
-
-			fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs;
-			per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state =
-				kern_hyp_va(fpsimd_state);
-		}
 	}
 }
 
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index ceeb0a4893aa7..332cb3904e68b 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -64,8 +64,6 @@  void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 	 */
 	fpsimd_save_and_flush_cpu_state();
 	*host_data_ptr(fp_owner) = FP_STATE_FREE;
-	*host_data_ptr(fpsimd_state) = NULL;
-	*host_data_ptr(fpmr_ptr) = NULL;
 
 	host_data_clear_flag(HOST_SVE_ENABLED);
 	if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f838a45665f26..c5b8a11ac4f50 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -375,7 +375,28 @@  static inline void __hyp_sve_save_host(void)
 			 true);
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu);
+static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * Non-protected kvm relies on the host restoring its sve state.
+	 * Protected kvm restores the host's sve state as not to reveal that
+	 * fpsimd was used by a guest nor leak upper sve bits.
+	 */
+	if (system_supports_sve()) {
+		__hyp_sve_save_host();
+
+		/* Re-enable SVE traps if not supported for the guest vcpu. */
+		if (!vcpu_has_sve(vcpu))
+			cpacr_clear_set(CPACR_EL1_ZEN, 0);
+
+	} else {
+		__fpsimd_save_state(host_data_ptr(host_ctxt.fp_regs));
+	}
+
+	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
+		*host_data_ptr(fpmr) = read_sysreg_s(SYS_FPMR);
+}
+
 
 /*
  * We trap the first access to the FP/SIMD to save the host context and
@@ -425,7 +446,7 @@  static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
 	isb();
 
 	/* Write out the host state if it's in the registers */
-	if (host_owns_fp_regs())
+	if (is_protected_kvm_enabled() && host_owns_fp_regs())
 		kvm_hyp_save_fpsimd_host(vcpu);
 
 	/* Restore the guest state */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 5c134520e1805..ad1abd5493862 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -83,7 +83,7 @@  static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
 	if (system_supports_sve())
 		__hyp_sve_restore_host();
 	else
-		__fpsimd_restore_state(*host_data_ptr(fpsimd_state));
+		__fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
 
 	if (has_fpmr)
 		write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6c846d033d24a..7a2d189176249 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -192,34 +192,6 @@  static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
 		kvm_handle_pvm_sysreg(vcpu, exit_code));
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-{
-	/*
-	 * Non-protected kvm relies on the host restoring its sve state.
-	 * Protected kvm restores the host's sve state as not to reveal that
-	 * fpsimd was used by a guest nor leak upper sve bits.
-	 */
-	if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
-		__hyp_sve_save_host();
-
-		/* Re-enable SVE traps if not supported for the guest vcpu. */
-		if (!vcpu_has_sve(vcpu))
-			cpacr_clear_set(CPACR_EL1_ZEN, 0);
-
-	} else {
-		__fpsimd_save_state(*host_data_ptr(fpsimd_state));
-	}
-
-	if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
-		u64 val = read_sysreg_s(SYS_FPMR);
-
-		if (unlikely(is_protected_kvm_enabled()))
-			*host_data_ptr(fpmr) = val;
-		else
-			**host_data_ptr(fpmr_ptr) = val;
-	}
-}
-
 static const exit_handler_fn hyp_exit_handlers[] = {
 	[0 ... ESR_ELx_EC_MAX]		= NULL,
 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index b5b9dbaf1fdd6..e8a07d4bb546b 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -413,14 +413,6 @@  static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
 	return true;
 }
 
-static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
-{
-	__fpsimd_save_state(*host_data_ptr(fpsimd_state));
-
-	if (kvm_has_fpmr(vcpu->kvm))
-		**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
-}
-
 static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	int ret = -EINVAL;