@@ -129,6 +129,7 @@ static inline bool pte_dirty(pte_t pte)
return pte_flags(pte) & _PAGE_DIRTY_BITS;
}
+#define pte_shstk pte_shstk
static inline bool pte_shstk(pte_t pte)
{
if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
@@ -147,6 +148,7 @@ static inline bool pmd_dirty(pmd_t pmd)
return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
}
+#define pmd_shstk pmd_shstk
static inline bool pmd_shstk(pmd_t pmd)
{
if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
@@ -500,6 +500,20 @@ static inline pte_t pte_mkwrite_shstk(pte_t pte)
}
#endif
+#ifndef pte_shstk
+static inline bool pte_shstk(pte_t pte)
+{
+ return false;
+}
+#endif
+
+#ifndef pmd_shstk
+static inline bool pmd_shstk(pmd_t pte)
+{
+ return false;
+}
+#endif
+
#ifndef pte_clear_savedwrite
#define pte_clear_savedwrite pte_wrprotect
#endif
@@ -1656,6 +1656,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/
orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
tlb->fullmm);
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
+ pmd_shstk(orig_pmd));
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
@@ -1437,6 +1437,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
+ pte_shstk(ptent));
tlb_remove_tlb_entry(tlb, pte, addr);
zap_install_uffd_wp_if_needed(vma, addr, pte, details,
ptent);