Message ID | 20181031175745.18650-6-punit.agrawal@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: Support PUD hugepage at stage 2 | expand |
On Wed, Oct 31, 2018 at 05:57:42PM +0000, Punit Agrawal wrote: > In preparation for creating PUD hugepages at stage 2, add support for > detecting execute permissions on PUD page table entries. Faults due to > lack of execute permissions on page table entries is used to perform > i-cache invalidation on first execute. > > Provide trivial implementations of arm32 helpers to allow sharing of > code. > > Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> > Cc: Christoffer Dall <christoffer.dall@arm.com> > Cc: Marc Zyngier <marc.zyngier@arm.com> > Cc: Russell King <linux@armlinux.org.uk> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will.deacon@arm.com> > --- > arch/arm/include/asm/kvm_mmu.h | 6 +++ > arch/arm64/include/asm/kvm_mmu.h | 5 +++ > arch/arm64/include/asm/pgtable-hwdef.h | 2 + > virt/kvm/arm/mmu.c | 53 +++++++++++++++++++++++--- > 4 files changed, 61 insertions(+), 5 deletions(-) > > diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h > index 37bf85d39607..839a619873d3 100644 > --- a/arch/arm/include/asm/kvm_mmu.h > +++ b/arch/arm/include/asm/kvm_mmu.h > @@ -102,6 +102,12 @@ static inline bool kvm_s2pud_readonly(pud_t *pud) > return false; > } > > +static inline bool kvm_s2pud_exec(pud_t *pud) > +{ > + BUG(); nit: I think this should be WARN() now :) > + return false; > +} > + > static inline pte_t kvm_s2pte_mkwrite(pte_t pte) > { > pte_val(pte) |= L_PTE_S2_RDWR; > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h > index 8da6d1b2a196..c755b37b3f92 100644 > --- a/arch/arm64/include/asm/kvm_mmu.h > +++ b/arch/arm64/include/asm/kvm_mmu.h > @@ -261,6 +261,11 @@ static inline bool kvm_s2pud_readonly(pud_t *pudp) > return kvm_s2pte_readonly((pte_t *)pudp); > } > > +static inline bool kvm_s2pud_exec(pud_t *pudp) > +{ > + return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); > +} > + > #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) > > #ifdef __PAGETABLE_PMD_FOLDED > diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h > index 1d7d8da2ef9b..336e24cddc87 100644 > --- a/arch/arm64/include/asm/pgtable-hwdef.h > +++ b/arch/arm64/include/asm/pgtable-hwdef.h > @@ -193,6 +193,8 @@ > #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ > #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ > > +#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ > + > /* > * Memory Attribute override for Stage-2 (MemAttr[3:0]) > */ > diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c > index 1c669c3c1208..8e44dccd1b47 100644 > --- a/virt/kvm/arm/mmu.c > +++ b/virt/kvm/arm/mmu.c > @@ -1083,23 +1083,66 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache > return 0; > } > > -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) > +/* > + * stage2_get_leaf_entry - walk the stage2 VM page tables and return > + * true if a valid and present leaf-entry is found. A pointer to the > + * leaf-entry is returned in the appropriate level variable - pudpp, > + * pmdpp, ptepp. > + */ > +static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, > + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) Do we need this type madness or could this just return a u64 pointer (NULL if nothing is found) and pass that to kvm_s2pte_exec (because we know it's the same bit we need to check regardless of the pgtable level on both arm and arm64)? Or do we consider that bad for some reason? Thanks, Christoffer > { > + pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > > - pmdp = stage2_get_pmd(kvm, NULL, addr); > + *pudpp = NULL; > + *pmdpp = NULL; > + *ptepp = NULL; > + > + pudp = stage2_get_pud(kvm, NULL, addr); > + if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp)) > + return false; > + > + if (stage2_pud_huge(kvm, *pudp)) { > + *pudpp = pudp; > + return true; > + } > + > + pmdp = stage2_pmd_offset(kvm, pudp, addr); > if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) > return false; > > - if (pmd_thp_or_huge(*pmdp)) > - return kvm_s2pmd_exec(pmdp); > + if (pmd_thp_or_huge(*pmdp)) { > + *pmdpp = pmdp; > + return true; > + } > > ptep = pte_offset_kernel(pmdp, addr); > if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) > return false; > > - return kvm_s2pte_exec(ptep); > + *ptepp = ptep; > + return true; > +} > + > +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) > +{ > + pud_t *pudp; > + pmd_t *pmdp; > + pte_t *ptep; > + bool found; > + > + found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); > + if (!found) > + return false; > + > + if (pudp) > + return kvm_s2pud_exec(pudp); > + else if (pmdp) > + return kvm_s2pmd_exec(pmdp); > + else > + return kvm_s2pte_exec(ptep); > } > > static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, > -- > 2.19.1 >
On 10/31/2018 11:27 PM, Punit Agrawal wrote: > In preparation for creating PUD hugepages at stage 2, add support for > detecting execute permissions on PUD page table entries. Faults due to > lack of execute permissions on page table entries is used to perform > i-cache invalidation on first execute. > > Provide trivial implementations of arm32 helpers to allow sharing of > code. > > Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> > Cc: Christoffer Dall <christoffer.dall@arm.com> > Cc: Marc Zyngier <marc.zyngier@arm.com> > Cc: Russell King <linux@armlinux.org.uk> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will.deacon@arm.com> > --- > arch/arm/include/asm/kvm_mmu.h | 6 +++ > arch/arm64/include/asm/kvm_mmu.h | 5 +++ > arch/arm64/include/asm/pgtable-hwdef.h | 2 + > virt/kvm/arm/mmu.c | 53 +++++++++++++++++++++++--- > 4 files changed, 61 insertions(+), 5 deletions(-) > > diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h > index 37bf85d39607..839a619873d3 100644 > --- a/arch/arm/include/asm/kvm_mmu.h > +++ b/arch/arm/include/asm/kvm_mmu.h > @@ -102,6 +102,12 @@ static inline bool kvm_s2pud_readonly(pud_t *pud) > return false; > } > > +static inline bool kvm_s2pud_exec(pud_t *pud) > +{ > + BUG(); > + return false; > +} > + > static inline pte_t kvm_s2pte_mkwrite(pte_t pte) > { > pte_val(pte) |= L_PTE_S2_RDWR; > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h > index 8da6d1b2a196..c755b37b3f92 100644 > --- a/arch/arm64/include/asm/kvm_mmu.h > +++ b/arch/arm64/include/asm/kvm_mmu.h > @@ -261,6 +261,11 @@ static inline bool kvm_s2pud_readonly(pud_t *pudp) > return kvm_s2pte_readonly((pte_t *)pudp); > } > > +static inline bool kvm_s2pud_exec(pud_t *pudp) > +{ > + return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); > +} > + > #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) > > #ifdef __PAGETABLE_PMD_FOLDED > diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h > index 1d7d8da2ef9b..336e24cddc87 100644 > --- a/arch/arm64/include/asm/pgtable-hwdef.h > +++ b/arch/arm64/include/asm/pgtable-hwdef.h > @@ -193,6 +193,8 @@ > #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ > #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ > > +#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ > + > /* > * Memory Attribute override for Stage-2 (MemAttr[3:0]) > */ > diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c > index 1c669c3c1208..8e44dccd1b47 100644 > --- a/virt/kvm/arm/mmu.c > +++ b/virt/kvm/arm/mmu.c > @@ -1083,23 +1083,66 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache > return 0; > } > > -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) > +/* > + * stage2_get_leaf_entry - walk the stage2 VM page tables and return > + * true if a valid and present leaf-entry is found. A pointer to the > + * leaf-entry is returned in the appropriate level variable - pudpp, > + * pmdpp, ptepp. > + */ > +static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, > + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) > { > + pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > > - pmdp = stage2_get_pmd(kvm, NULL, addr); > + *pudpp = NULL; > + *pmdpp = NULL; > + *ptepp = NULL; > + > + pudp = stage2_get_pud(kvm, NULL, addr); > + if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp)) > + return false; > + > + if (stage2_pud_huge(kvm, *pudp)) { > + *pudpp = pudp; > + return true; > + } > + > + pmdp = stage2_pmd_offset(kvm, pudp, addr); > if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) > return false; > > - if (pmd_thp_or_huge(*pmdp)) > - return kvm_s2pmd_exec(pmdp); > + if (pmd_thp_or_huge(*pmdp)) { > + *pmdpp = pmdp; > + return true; > + } > > ptep = pte_offset_kernel(pmdp, addr); > if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) > return false; > > - return kvm_s2pte_exec(ptep); > + *ptepp = ptep; > + return true; > +} > + > +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) > +{ > + pud_t *pudp; > + pmd_t *pmdp; > + pte_t *ptep; > + bool found; > + > + found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); > + if (!found) > + return false; > + > + if (pudp) > + return kvm_s2pud_exec(pudp); > + else if (pmdp) > + return kvm_s2pmd_exec(pmdp); > + else > + return kvm_s2pte_exec(ptep); > } stage2_get_leaf_entry() is not really necessary as a separate function. It determines leaf entry and just return a true/false. At those 'true' return points it can just return kvm_s2XXX_exec() directly. Passing three different pointers as arguments and checking for them being non-NULL upon return and doing a simple return then seems like a lot without much reason. stage2_is_exec() can just be expanded to add PUD support.
On 01/11/2018 13:38, Christoffer Dall wrote: > On Wed, Oct 31, 2018 at 05:57:42PM +0000, Punit Agrawal wrote: >> In preparation for creating PUD hugepages at stage 2, add support for >> detecting execute permissions on PUD page table entries. Faults due to >> lack of execute permissions on page table entries is used to perform >> i-cache invalidation on first execute. >> >> Provide trivial implementations of arm32 helpers to allow sharing of >> code. >> >> Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> >> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> >> Cc: Christoffer Dall <christoffer.dall@arm.com> >> Cc: Marc Zyngier <marc.zyngier@arm.com> >> Cc: Russell King <linux@armlinux.org.uk> >> Cc: Catalin Marinas <catalin.marinas@arm.com> >> Cc: Will Deacon <will.deacon@arm.com> >> --- >> arch/arm/include/asm/kvm_mmu.h | 6 +++ >> arch/arm64/include/asm/kvm_mmu.h | 5 +++ >> arch/arm64/include/asm/pgtable-hwdef.h | 2 + >> virt/kvm/arm/mmu.c | 53 +++++++++++++++++++++++--- >> 4 files changed, 61 insertions(+), 5 deletions(-) >> >> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h >> index 37bf85d39607..839a619873d3 100644 >> --- a/arch/arm/include/asm/kvm_mmu.h >> +++ b/arch/arm/include/asm/kvm_mmu.h >> @@ -102,6 +102,12 @@ static inline bool kvm_s2pud_readonly(pud_t *pud) >> return false; >> } >> >> +static inline bool kvm_s2pud_exec(pud_t *pud) >> +{ >> + BUG(); > > nit: I think this should be WARN() now :) > >> + return false; >> +} >> + >> static inline pte_t kvm_s2pte_mkwrite(pte_t pte) >> { >> pte_val(pte) |= L_PTE_S2_RDWR; >> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h >> index 8da6d1b2a196..c755b37b3f92 100644 >> --- a/arch/arm64/include/asm/kvm_mmu.h >> +++ b/arch/arm64/include/asm/kvm_mmu.h >> @@ -261,6 +261,11 @@ static inline bool kvm_s2pud_readonly(pud_t *pudp) >> return kvm_s2pte_readonly((pte_t *)pudp); >> } >> >> +static inline bool kvm_s2pud_exec(pud_t *pudp) >> +{ >> + return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); >> +} >> + >> #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) >> >> #ifdef __PAGETABLE_PMD_FOLDED >> diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h >> index 1d7d8da2ef9b..336e24cddc87 100644 >> --- a/arch/arm64/include/asm/pgtable-hwdef.h >> +++ b/arch/arm64/include/asm/pgtable-hwdef.h >> @@ -193,6 +193,8 @@ >> #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ >> #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ >> >> +#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ >> + >> /* >> * Memory Attribute override for Stage-2 (MemAttr[3:0]) >> */ >> diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c >> index 1c669c3c1208..8e44dccd1b47 100644 >> --- a/virt/kvm/arm/mmu.c >> +++ b/virt/kvm/arm/mmu.c >> @@ -1083,23 +1083,66 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache >> return 0; >> } >> >> -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) >> +/* >> + * stage2_get_leaf_entry - walk the stage2 VM page tables and return >> + * true if a valid and present leaf-entry is found. A pointer to the >> + * leaf-entry is returned in the appropriate level variable - pudpp, >> + * pmdpp, ptepp. >> + */ >> +static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, >> + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) > > Do we need this type madness or could this just return a u64 pointer > (NULL if nothing is found) and pass that to kvm_s2pte_exec (because we > know it's the same bit we need to check regardless of the pgtable level > on both arm and arm64)? > > Or do we consider that bad for some reason? Practically, yes the bit positions are same and thus we should be able to do this assuming that it is just a pte. When we get to independent stage2 pgtable implementation which treats all page table entries as a single type with a level information, we should be able to get rid of these. But since we have followed the Linux way of page-table manipulation where we have "level" specific accessors. The other option is open code the walking sequence from the pgd to the leaf entry everywhere. I am fine with changing this code, if you like. Cheers Suzuki
On Wed, Dec 05, 2018 at 05:57:51PM +0000, Suzuki K Poulose wrote: > > > On 01/11/2018 13:38, Christoffer Dall wrote: > >On Wed, Oct 31, 2018 at 05:57:42PM +0000, Punit Agrawal wrote: > >>In preparation for creating PUD hugepages at stage 2, add support for > >>detecting execute permissions on PUD page table entries. Faults due to > >>lack of execute permissions on page table entries is used to perform > >>i-cache invalidation on first execute. > >> > >>Provide trivial implementations of arm32 helpers to allow sharing of > >>code. > >> > >>Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> > >>Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> > >>Cc: Christoffer Dall <christoffer.dall@arm.com> > >>Cc: Marc Zyngier <marc.zyngier@arm.com> > >>Cc: Russell King <linux@armlinux.org.uk> > >>Cc: Catalin Marinas <catalin.marinas@arm.com> > >>Cc: Will Deacon <will.deacon@arm.com> > >>--- > >> arch/arm/include/asm/kvm_mmu.h | 6 +++ > >> arch/arm64/include/asm/kvm_mmu.h | 5 +++ > >> arch/arm64/include/asm/pgtable-hwdef.h | 2 + > >> virt/kvm/arm/mmu.c | 53 +++++++++++++++++++++++--- > >> 4 files changed, 61 insertions(+), 5 deletions(-) > >> > >>diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h > >>index 37bf85d39607..839a619873d3 100644 > >>--- a/arch/arm/include/asm/kvm_mmu.h > >>+++ b/arch/arm/include/asm/kvm_mmu.h > >>@@ -102,6 +102,12 @@ static inline bool kvm_s2pud_readonly(pud_t *pud) > >> return false; > >> } > >>+static inline bool kvm_s2pud_exec(pud_t *pud) > >>+{ > >>+ BUG(); > > > >nit: I think this should be WARN() now :) > > > >>+ return false; > >>+} > >>+ > >> static inline pte_t kvm_s2pte_mkwrite(pte_t pte) > >> { > >> pte_val(pte) |= L_PTE_S2_RDWR; > >>diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h > >>index 8da6d1b2a196..c755b37b3f92 100644 > >>--- a/arch/arm64/include/asm/kvm_mmu.h > >>+++ b/arch/arm64/include/asm/kvm_mmu.h > >>@@ -261,6 +261,11 @@ static inline bool kvm_s2pud_readonly(pud_t *pudp) > >> return kvm_s2pte_readonly((pte_t *)pudp); > >> } > >>+static inline bool kvm_s2pud_exec(pud_t *pudp) > >>+{ > >>+ return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); > >>+} > >>+ > >> #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) > >> #ifdef __PAGETABLE_PMD_FOLDED > >>diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h > >>index 1d7d8da2ef9b..336e24cddc87 100644 > >>--- a/arch/arm64/include/asm/pgtable-hwdef.h > >>+++ b/arch/arm64/include/asm/pgtable-hwdef.h > >>@@ -193,6 +193,8 @@ > >> #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ > >> #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ > >>+#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ > >>+ > >> /* > >> * Memory Attribute override for Stage-2 (MemAttr[3:0]) > >> */ > >>diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c > >>index 1c669c3c1208..8e44dccd1b47 100644 > >>--- a/virt/kvm/arm/mmu.c > >>+++ b/virt/kvm/arm/mmu.c > >>@@ -1083,23 +1083,66 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache > >> return 0; > >> } > >>-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) > >>+/* > >>+ * stage2_get_leaf_entry - walk the stage2 VM page tables and return > >>+ * true if a valid and present leaf-entry is found. A pointer to the > >>+ * leaf-entry is returned in the appropriate level variable - pudpp, > >>+ * pmdpp, ptepp. > >>+ */ > >>+static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, > >>+ pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) > > > >Do we need this type madness or could this just return a u64 pointer > >(NULL if nothing is found) and pass that to kvm_s2pte_exec (because we > >know it's the same bit we need to check regardless of the pgtable level > >on both arm and arm64)? > > > >Or do we consider that bad for some reason? > > Practically, yes the bit positions are same and thus we should be able > to do this assuming that it is just a pte. When we get to independent stage2 > pgtable implementation which treats all page table entries as a single type > with a level information, we should be able to get rid of these. > But since we have followed the Linux way of page-table manipulation where we > have "level" specific accessors. The other option is open code the walking > sequence from the pgd to the leaf entry everywhere. > > I am fine with changing this code, if you like. > Meh, it just looked a bit over-engineered to me when I originally looked at the patches, but you're right, they align with the rest of the implementation. Thanks, Christoffer
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 37bf85d39607..839a619873d3 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -102,6 +102,12 @@ static inline bool kvm_s2pud_readonly(pud_t *pud) return false; } +static inline bool kvm_s2pud_exec(pud_t *pud) +{ + BUG(); + return false; +} + static inline pte_t kvm_s2pte_mkwrite(pte_t pte) { pte_val(pte) |= L_PTE_S2_RDWR; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 8da6d1b2a196..c755b37b3f92 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -261,6 +261,11 @@ static inline bool kvm_s2pud_readonly(pud_t *pudp) return kvm_s2pte_readonly((pte_t *)pudp); } +static inline bool kvm_s2pud_exec(pud_t *pudp) +{ + return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN); +} + #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) #ifdef __PAGETABLE_PMD_FOLDED diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 1d7d8da2ef9b..336e24cddc87 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -193,6 +193,8 @@ #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ +#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */ + /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) */ diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 1c669c3c1208..8e44dccd1b47 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1083,23 +1083,66 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache return 0; } -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) +/* + * stage2_get_leaf_entry - walk the stage2 VM page tables and return + * true if a valid and present leaf-entry is found. A pointer to the + * leaf-entry is returned in the appropriate level variable - pudpp, + * pmdpp, ptepp. + */ +static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) { + pud_t *pudp; pmd_t *pmdp; pte_t *ptep; - pmdp = stage2_get_pmd(kvm, NULL, addr); + *pudpp = NULL; + *pmdpp = NULL; + *ptepp = NULL; + + pudp = stage2_get_pud(kvm, NULL, addr); + if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp)) + return false; + + if (stage2_pud_huge(kvm, *pudp)) { + *pudpp = pudp; + return true; + } + + pmdp = stage2_pmd_offset(kvm, pudp, addr); if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) return false; - if (pmd_thp_or_huge(*pmdp)) - return kvm_s2pmd_exec(pmdp); + if (pmd_thp_or_huge(*pmdp)) { + *pmdpp = pmdp; + return true; + } ptep = pte_offset_kernel(pmdp, addr); if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) return false; - return kvm_s2pte_exec(ptep); + *ptepp = ptep; + return true; +} + +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + bool found; + + found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); + if (!found) + return false; + + if (pudp) + return kvm_s2pud_exec(pudp); + else if (pmdp) + return kvm_s2pmd_exec(pmdp); + else + return kvm_s2pte_exec(ptep); } static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,