Message ID | 20201116204318.63987-24-dbrazdil@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Opt-in always-on nVHE hypervisor | expand |
On Mon, 16 Nov 2020 20:43:17 +0000, David Brazdil <dbrazdil@google.com> wrote: > > While protected nVHE KVM is installed, start trapping all host SMCs. > By default, these are simply forwarded to EL3, but PSCI SMCs are > validated first. > > Create new constant HCR_HOST_NVHE_PROTECTED_FLAGS with the new set of HCR > flags to use while the nVHE vector is installed when the kernel was > booted with the protected flag enabled. Switch back to the default HCR > flags when switching back to the stub vector. > > Signed-off-by: David Brazdil <dbrazdil@google.com> > --- > arch/arm64/include/asm/kvm_arm.h | 1 + > arch/arm64/kvm/hyp/nvhe/hyp-init.S | 12 ++++++++++++ > arch/arm64/kvm/hyp/nvhe/switch.c | 5 ++++- > 3 files changed, 17 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h > index 64ce29378467..4e90c2debf70 100644 > --- a/arch/arm64/include/asm/kvm_arm.h > +++ b/arch/arm64/include/asm/kvm_arm.h > @@ -80,6 +80,7 @@ > HCR_FMO | HCR_IMO | HCR_PTW ) > #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) > #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) > +#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) > #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) > > /* TCR_EL2 Registers bits */ > diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S > index 6d8202d2bdfb..8f3602f320ac 100644 > --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S > +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S > @@ -88,6 +88,12 @@ SYM_CODE_END(__kvm_hyp_init) > * x0: struct kvm_nvhe_init_params PA > */ > SYM_CODE_START(___kvm_hyp_init) > +alternative_if ARM64_PROTECTED_KVM > + mov_q x1, HCR_HOST_NVHE_PROTECTED_FLAGS > + msr hcr_el2, x1 > + isb Why the ISB? For HCR_TSC to have any effect, you'll have to go via an ERET to EL1 first, which will have the required synchronisation effect. > +alternative_else_nop_endif > + > ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] > msr tpidr_el2, x1 > > @@ -224,6 +230,12 @@ reset: > msr sctlr_el2, x5 > isb > > +alternative_if ARM64_PROTECTED_KVM > + mov_q x5, HCR_HOST_NVHE_FLAGS > + msr hcr_el2, x5 > + isb Same thing here, I believe. > +alternative_else_nop_endif > + > /* Install stub vectors */ > adr_l x5, __hyp_stub_vectors > msr vbar_el2, x5 > diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c > index 8ae8160bc93a..e1f8e0797144 100644 > --- a/arch/arm64/kvm/hyp/nvhe/switch.c > +++ b/arch/arm64/kvm/hyp/nvhe/switch.c > @@ -96,7 +96,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) > mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; > > write_sysreg(mdcr_el2, mdcr_el2); > - write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); > + if (is_protected_kvm_enabled()) > + write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2); > + else > + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); > write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); > write_sysreg(__kvm_hyp_host_vector, vbar_el2); > } > -- > 2.29.2.299.gdc1121823c-goog > > Thanks, M.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 64ce29378467..4e90c2debf70 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -80,6 +80,7 @@ HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) +#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) /* TCR_EL2 Registers bits */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 6d8202d2bdfb..8f3602f320ac 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -88,6 +88,12 @@ SYM_CODE_END(__kvm_hyp_init) * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START(___kvm_hyp_init) +alternative_if ARM64_PROTECTED_KVM + mov_q x1, HCR_HOST_NVHE_PROTECTED_FLAGS + msr hcr_el2, x1 + isb +alternative_else_nop_endif + ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] msr tpidr_el2, x1 @@ -224,6 +230,12 @@ reset: msr sctlr_el2, x5 isb +alternative_if ARM64_PROTECTED_KVM + mov_q x5, HCR_HOST_NVHE_FLAGS + msr hcr_el2, x5 + isb +alternative_else_nop_endif + /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 8ae8160bc93a..e1f8e0797144 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -96,7 +96,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; write_sysreg(mdcr_el2, mdcr_el2); - write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); + if (is_protected_kvm_enabled()) + write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2); + else + write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); write_sysreg(__kvm_hyp_host_vector, vbar_el2); }
While protected nVHE KVM is installed, start trapping all host SMCs. By default, these are simply forwarded to EL3, but PSCI SMCs are validated first. Create new constant HCR_HOST_NVHE_PROTECTED_FLAGS with the new set of HCR flags to use while the nVHE vector is installed when the kernel was booted with the protected flag enabled. Switch back to the default HCR flags when switching back to the stub vector. Signed-off-by: David Brazdil <dbrazdil@google.com> --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/kvm/hyp/nvhe/hyp-init.S | 12 ++++++++++++ arch/arm64/kvm/hyp/nvhe/switch.c | 5 ++++- 3 files changed, 17 insertions(+), 1 deletion(-)