@@ -701,6 +701,9 @@
#define ID_AA64ZFR0_SVEVER_SVE2 0x1
/* id_aa64mmfr0 */
+#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
+#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
+#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
@@ -95,6 +95,35 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR0_EL1:
+ /* Hide unsupported S2 page sizes */
+ switch (PAGE_SIZE) {
+ case SZ_64K:
+ val &= ~FEATURE(ID_AA64MMFR0_TGRAN16_2);
+ val |= FIELD_PREP(FEATURE(ID_AA64MMFR0_TGRAN16_2), 0b0001);
+ /* Fall through */
+ case SZ_16K:
+ val &= ~FEATURE(ID_AA64MMFR0_TGRAN4_2);
+ val |= FIELD_PREP(FEATURE(ID_AA64MMFR0_TGRAN4_2), 0b0001);
+ /* Fall through */
+ case SZ_4K:
+ /* Support everything */
+ break;
+ }
+ /* Advertize supported S2 page sizes */
+ switch (PAGE_SIZE) {
+ case SZ_4K:
+ val &= ~FEATURE(ID_AA64MMFR0_TGRAN4_2);
+ val |= FIELD_PREP(FEATURE(ID_AA64MMFR0_TGRAN4_2), 0b0010);
+ /* Fall through */
+ case SZ_16K:
+ val &= ~FEATURE(ID_AA64MMFR0_TGRAN16_2);
+ val |= FIELD_PREP(FEATURE(ID_AA64MMFR0_TGRAN16_2), 0b0010);
+ /* Fall through */
+ case SZ_64K:
+ val &= ~FEATURE(ID_AA64MMFR0_TGRAN64_2);
+ val |= FIELD_PREP(FEATURE(ID_AA64MMFR0_TGRAN64_2), 0b0010);
+ break;
+ }
/* Cap PARange to 40bits */
tmp = FIELD_GET(FEATURE(ID_AA64MMFR0_PARANGE), val);
if (tmp > 0b0010) {
ARMv8.5-GTG gives the opportunity to advertize the supported Stage-2 page sizes to hypervisors, and allow them to differ from the page sizes supported at Stage-1. As KVM cannot support guest Stage-2 page sizes that are smaller than PAGE_SIZE (it would break the guest's isolation guarantees), let's use this feature to let the guest know (assuming it has been told about ARMv8.5-GTG). Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/sysreg.h | 3 +++ arch/arm64/kvm/nested.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+)