@@ -166,6 +166,7 @@ enum zone_stat_item {
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
NR_FREE_CMA_PAGES,
+ NR_BAD_PAGES,
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
@@ -1408,7 +1408,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
__memcg_kmem_uncharge_page(page, order);
reset_page_owner(page, order);
page_table_check_free(page, order);
- return false;
+ goto err;
}
/*
@@ -1442,7 +1442,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (check_free)
bad += check_free_page(page);
if (bad)
- return false;
+ goto err;
page_cpupid_reset_last(page);
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
@@ -1486,6 +1486,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_pagealloc_unmap_pages(page, 1 << order);
return true;
+err:
+ __mod_zone_page_state(page_zone(page), NR_BAD_PAGES, 1 << order);
+ kmemleak_alloc(page_address(page), PAGE_SIZE << order, 1, GFP_KERNEL);
+ return false;
+
}
#ifdef CONFIG_DEBUG_VM
@@ -1587,8 +1592,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
count -= nr_pages;
pcp->count -= nr_pages;
- if (bulkfree_pcp_prepare(page))
+ if (bulkfree_pcp_prepare(page)) {
+ __mod_zone_page_state(page_zone(page), NR_BAD_PAGES, 1 << order);
continue;
+ }
/* MIGRATE_ISOLATE page should not go to pcplists */
VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
@@ -1193,6 +1193,7 @@ int fragmentation_index(struct zone *zone, unsigned int order)
"nr_zspages",
#endif
"nr_free_cma",
+ "nr_bad_pages",
/* enum numa_stat_item counters */
#ifdef CONFIG_NUMA