@@ -1692,6 +1692,9 @@ static inline bool arch_faults_on_old_pte(void)
#define maybe_mkwrite maybe_mkwrite
extern pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma);
+#define is_shadow_stack_mapping is_shadow_stack_mapping
+extern bool is_shadow_stack_mapping(vm_flags_t vm_flags);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_H */
@@ -897,3 +897,8 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+bool is_shadow_stack_mapping(vm_flags_t vm_flags)
+{
+ return vm_flags & VM_SHADOW_STACK;
+}
@@ -1446,6 +1446,13 @@ static inline bool arch_has_pfn_modify_check(void)
}
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+#ifndef is_shadow_stack_mapping
+static inline bool is_shadow_stack_mapping(vm_flags_t vm_flags)
+{
+ return false;
+}
+#endif
+
/*
* Architecture PAGE_KERNEL_* fallbacks
*
@@ -1718,6 +1718,9 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
if (file && is_file_hugepages(file))
return 0;
+ if (is_shadow_stack_mapping(vm_flags))
+ return 1;
+
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
}
@@ -3387,6 +3390,8 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
mm->stack_vm += npages;
else if (is_data_mapping(flags))
mm->data_vm += npages;
+ else if (is_shadow_stack_mapping(flags))
+ mm->stack_vm += npages;
}
static vm_fault_t special_mapping_fault(struct vm_fault *vmf);