@@ -81,7 +81,7 @@ Basic Algorithm
---------------
The memory allocations via :c:func:`kmalloc`, :c:func:`vmalloc`,
-:c:func:`kmem_cache_alloc` and
+:c:func:`kmem_cache_alloc`, :c:func:`alloc_pages(__GFP_TRACKLEAK)` (1)and
friends are traced and the pointers, together with additional
information like size and stack trace, are stored in a rbtree.
The corresponding freeing function calls are tracked and the pointers
@@ -257,3 +257,6 @@ memory leaks``. Then read the file to see then::
Removing the module with ``rmmod kmemleak_test`` should also trigger some
kmemleak results.
+
+(1)Don't use __GFP_TRACKLEAK when getting pages for vm_iomap_memory which map
+physical address from kernel to userspace.
@@ -68,6 +68,11 @@
#else
#define ___GFP_NOLOCKDEP 0
#endif
+#ifdef CONFIG_HAVE_DEBUG_KMEMLEAK
+#define ___GFP_TRACKLEAK 0x10000000u
+#else
+#define ___GFP_TRACKLEAK 0
+#endif
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -259,12 +264,13 @@
#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
+#define __GFP_TRACKLEAK ((__force gfp_t)___GFP_TRACKLEAK)
/* Disable lockdep for GFP context tracking */
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
+#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP) + IS_ENABLED(CONFIG_HAVE_DEBUG_KMEMLEAK))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/**
@@ -942,6 +942,7 @@ static inline bool is_page_hwpoison(struct page *page)
#define PG_offline 0x00000100
#define PG_table 0x00000200
#define PG_guard 0x00000400
+#define PG_trackleak 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -1012,6 +1013,8 @@ static inline bool page_has_type(struct page *page)
*/
PAGE_TYPE_OPS(Guard, guard)
+PAGE_TYPE_OPS(Trackleak, trackleak)
+
extern bool is_free_buddy_page(struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1357,6 +1357,10 @@ static __always_inline bool free_pages_prepare(struct page *page,
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
}
}
+ if (PageTrackleak(page)) {
+ __ClearPageTrackleak(page);
+ kmemleak_free(page_address(page));
+ }
if (PageMappingFlags(page))
page->mapping = NULL;
if (memcg_kmem_enabled() && PageMemcgKmem(page))
@@ -1521,6 +1525,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
if (unlikely(isolated_pageblocks))
mt = get_pageblock_migratetype(page);
+ if (PageTrackleak(page)) {
+ __ClearPageTrackleak(page);
+ kmemleak_free(page_address(page));
+ }
+
__free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, order, mt);
} while (count > 0 && !list_empty(list));
@@ -2468,6 +2477,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
set_page_pfmemalloc(page);
else
clear_page_pfmemalloc(page);
+
+ if (gfp_flags & __GFP_TRACKLEAK) {
+ kmemleak_alloc(page_address(page), PAGE_SIZE << order, 1, gfp_flags & ~__GFP_TRACKLEAK);
+ __SetPageTrackleak(page);
+ }
}
/*