@@ -22,6 +22,12 @@ static bool __page_table_check_enabled __initdata =
DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
EXPORT_SYMBOL(page_table_check_disabled);
+#define PAGE_TABLE_CHECK_WARN(v) \
+ do { \
+ if (WARN_ON_ONCE(v)) \
+ static_branch_enable(&page_table_check_disabled); \
+ } while (false)
+
static int __init early_page_table_check_param(char *buf)
{
return kstrtobool(buf, &__page_table_check_enabled);
@@ -50,7 +56,8 @@ struct page_ext_operations page_table_check_ops = {
static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
{
- BUG_ON(!page_ext);
+ PAGE_TABLE_CHECK_WARN(!page_ext);
+
return (void *)(page_ext) + page_table_check_ops.offset;
}
@@ -72,18 +79,18 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
- BUG_ON(PageSlab(page));
+ PAGE_TABLE_CHECK_WARN(PageSlab(page));
anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
- BUG_ON(atomic_read(&ptc->file_map_count));
- BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
+ PAGE_TABLE_CHECK_WARN(atomic_read(&ptc->file_map_count));
+ PAGE_TABLE_CHECK_WARN(atomic_dec_return(&ptc->anon_map_count) < 0);
} else {
- BUG_ON(atomic_read(&ptc->anon_map_count));
- BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
+ PAGE_TABLE_CHECK_WARN(atomic_read(&ptc->anon_map_count));
+ PAGE_TABLE_CHECK_WARN(atomic_dec_return(&ptc->file_map_count) < 0);
}
page_ext = page_ext_next(page_ext);
}
@@ -110,18 +117,18 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
- BUG_ON(PageSlab(page));
+ PAGE_TABLE_CHECK_WARN(PageSlab(page));
anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
if (anon) {
- BUG_ON(atomic_read(&ptc->file_map_count));
- BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
+ PAGE_TABLE_CHECK_WARN(atomic_read(&ptc->file_map_count));
+ PAGE_TABLE_CHECK_WARN(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
} else {
- BUG_ON(atomic_read(&ptc->anon_map_count));
- BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
+ PAGE_TABLE_CHECK_WARN(atomic_read(&ptc->anon_map_count));
+ PAGE_TABLE_CHECK_WARN(atomic_inc_return(&ptc->file_map_count) < 0);
}
page_ext = page_ext_next(page_ext);
}
@@ -137,15 +144,15 @@ void __page_table_check_zero(struct page *page, unsigned int order)
struct page_ext *page_ext;
unsigned long i;
- BUG_ON(PageSlab(page));
+ PAGE_TABLE_CHECK_WARN(PageSlab(page));
page_ext = page_ext_get(page);
- BUG_ON(!page_ext);
+ PAGE_TABLE_CHECK_WARN(!page_ext);
for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
- BUG_ON(atomic_read(&ptc->anon_map_count));
- BUG_ON(atomic_read(&ptc->file_map_count));
+ PAGE_TABLE_CHECK_WARN(atomic_read(&ptc->anon_map_count));
+ PAGE_TABLE_CHECK_WARN(atomic_read(&ptc->file_map_count));
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
Currently, page_table_check when detects errors panics the kernel. Instead, print a warning as it is more useful compared to unconditionally crashing the machine. However, once a warning is detected, the counting of page_table_check becomes unbalanced, therefore, disable its activity until the next boot. In case of where machine hardening requires a more secure environment, it is still possible to crash machine on page_table_check errors via panic_on_warn sysctl option. Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> --- mm/page_table_check.c | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-)