Message ID | 20230301003545.282859-3-pcc@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kasan: bugfix and cleanup | expand |
On Wed, Mar 1, 2023 at 1:35 AM Peter Collingbourne <pcc@google.com> wrote: > > Code inspection reveals that PG_skip_kasan_poison is redundant with > kasantag, because the former is intended to be set iff the latter is > the match-all tag. It can also be observed that it's basically pointless > to poison pages which have kasantag=0, because any pages with this tag > would have been pointed to by pointers with match-all tags, so poisoning > the pages would have little to no effect in terms of bug detection. > Therefore, change the condition in should_skip_kasan_poison() to check > kasantag instead, and remove PG_skip_kasan_poison and associated flags. > > Signed-off-by: Peter Collingbourne <pcc@google.com> > Link: https://linux-review.googlesource.com/id/I57f825f2eaeaf7e8389d6cf4597c8a5821359838 > --- > v3: > - update comments > > v2: > - also remove GFP_SKIP_KASAN_POISON and FPI_SKIP_KASAN_POISON > - rename GFP_SKIP_KASAN_UNPOISON to GFP_SKIP_KASAN > - update comments > - simplify control flow by removing reset_tags > > include/linux/gfp_types.h | 30 ++++++------- > include/linux/page-flags.h | 9 ---- > include/trace/events/mmflags.h | 12 +---- > mm/kasan/hw_tags.c | 2 +- > mm/page_alloc.c | 81 +++++++++++++--------------------- > mm/vmalloc.c | 2 +- > 6 files changed, 47 insertions(+), 89 deletions(-) > > diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h > index 5088637fe5c2..6583a58670c5 100644 > --- a/include/linux/gfp_types.h > +++ b/include/linux/gfp_types.h > @@ -47,16 +47,14 @@ typedef unsigned int __bitwise gfp_t; > #define ___GFP_ACCOUNT 0x400000u > #define ___GFP_ZEROTAGS 0x800000u > #ifdef CONFIG_KASAN_HW_TAGS > -#define ___GFP_SKIP_ZERO 0x1000000u > -#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u > -#define ___GFP_SKIP_KASAN_POISON 0x4000000u > +#define ___GFP_SKIP_ZERO 0x1000000u > +#define ___GFP_SKIP_KASAN 0x2000000u > #else > -#define ___GFP_SKIP_ZERO 0 > -#define ___GFP_SKIP_KASAN_UNPOISON 0 > -#define ___GFP_SKIP_KASAN_POISON 0 > +#define ___GFP_SKIP_ZERO 0 > +#define ___GFP_SKIP_KASAN 0 > #endif > #ifdef CONFIG_LOCKDEP > -#define ___GFP_NOLOCKDEP 0x8000000u > +#define ___GFP_NOLOCKDEP 0x4000000u > #else > #define ___GFP_NOLOCKDEP 0 > #endif > @@ -234,25 +232,24 @@ typedef unsigned int __bitwise gfp_t; > * memory tags at the same time as zeroing memory has minimal additional > * performace impact. > * > - * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. > - * Only effective in HW_TAGS mode. > - * > - * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. > - * Typically, used for userspace pages. Only effective in HW_TAGS mode. > + * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation. > + * Used for userspace and vmalloc pages; the latter are unpoisoned by > + * kasan_unpoison_vmalloc instead. For userspace pages, results in > + * poisoning being skipped as well, see should_skip_kasan_poison for > + * details. Only effective in HW_TAGS mode. > */ > #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) > #define __GFP_COMP ((__force gfp_t)___GFP_COMP) > #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) > #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) > #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) > -#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) > -#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) > +#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN) > > /* Disable lockdep for GFP context tracking */ > #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) > > /* Room for N __GFP_FOO bits */ > -#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) > +#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) > #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) > > /** > @@ -335,8 +332,7 @@ typedef unsigned int __bitwise gfp_t; > #define GFP_DMA __GFP_DMA > #define GFP_DMA32 __GFP_DMA32 > #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) > -#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ > - __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) > +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN) > #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ > __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) > #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h > index a7e3a3405520..74f81a52e7e1 100644 > --- a/include/linux/page-flags.h > +++ b/include/linux/page-flags.h > @@ -135,9 +135,6 @@ enum pageflags { > #ifdef CONFIG_ARCH_USES_PG_ARCH_X > PG_arch_2, > PG_arch_3, > -#endif > -#ifdef CONFIG_KASAN_HW_TAGS > - PG_skip_kasan_poison, > #endif > __NR_PAGEFLAGS, > > @@ -594,12 +591,6 @@ TESTCLEARFLAG(Young, young, PF_ANY) > PAGEFLAG(Idle, idle, PF_ANY) > #endif > > -#ifdef CONFIG_KASAN_HW_TAGS > -PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) > -#else > -PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) > -#endif > - > /* > * PageReported() is used to track reported free pages within the Buddy > * allocator. We can use the non-atomic version of the test and set > diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h > index 9db52bc4ce19..232bc8efc98e 100644 > --- a/include/trace/events/mmflags.h > +++ b/include/trace/events/mmflags.h > @@ -55,8 +55,7 @@ > #ifdef CONFIG_KASAN_HW_TAGS > #define __def_gfpflag_names_kasan , \ > gfpflag_string(__GFP_SKIP_ZERO), \ > - gfpflag_string(__GFP_SKIP_KASAN_POISON), \ > - gfpflag_string(__GFP_SKIP_KASAN_UNPOISON) > + gfpflag_string(__GFP_SKIP_KASAN) > #else > #define __def_gfpflag_names_kasan > #endif > @@ -96,12 +95,6 @@ > #define IF_HAVE_PG_ARCH_X(flag,string) > #endif > > -#ifdef CONFIG_KASAN_HW_TAGS > -#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string} > -#else > -#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) > -#endif > - > #define __def_pageflag_names \ > {1UL << PG_locked, "locked" }, \ > {1UL << PG_waiters, "waiters" }, \ > @@ -130,8 +123,7 @@ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ > IF_HAVE_PG_IDLE(PG_young, "young" ) \ > IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ > IF_HAVE_PG_ARCH_X(PG_arch_2, "arch_2" ) \ > -IF_HAVE_PG_ARCH_X(PG_arch_3, "arch_3" ) \ > -IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") > +IF_HAVE_PG_ARCH_X(PG_arch_3, "arch_3" ) > > #define show_page_flags(flags) \ > (flags) ? __print_flags(flags, "|", \ > diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c > index d1bcb0205327..bb4f56e5bdec 100644 > --- a/mm/kasan/hw_tags.c > +++ b/mm/kasan/hw_tags.c > @@ -318,7 +318,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, > * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags > * the first virtual mapping, which is created by vmalloc(). > * Tagging the page_alloc memory backing that vmalloc() allocation is > - * skipped, see ___GFP_SKIP_KASAN_UNPOISON. > + * skipped, see ___GFP_SKIP_KASAN. > * > * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual. > */ > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 7136c36c5d01..0db33faf760d 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -112,17 +112,6 @@ typedef int __bitwise fpi_t; > */ > #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) > > -/* > - * Don't poison memory with KASAN (only for the tag-based modes). > - * During boot, all non-reserved memblock memory is exposed to page_alloc. > - * Poisoning all that memory lengthens boot time, especially on systems with > - * large amount of RAM. This flag is used to skip that poisoning. > - * This is only done for the tag-based KASAN modes, as those are able to > - * detect memory corruptions with the memory tags assigned by default. > - * All memory allocated normally after boot gets poisoned as usual. > - */ > -#define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) > - > /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ > static DEFINE_MUTEX(pcp_batch_high_lock); > #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) > @@ -1355,13 +1344,19 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) > /* > * Skip KASAN memory poisoning when either: > * > - * 1. Deferred memory initialization has not yet completed, > - * see the explanation below. > - * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON, > - * see the comment next to it. > - * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON, > - * see the comment next to it. > - * 4. The allocation is excluded from being checked due to sampling, > + * 1. For generic KASAN: deferred memory initialization has not yet completed. > + * Tag-based KASAN modes skip pages freed via deferred memory initialization > + * using page tags instead (see below). > + * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating > + * that error detection is disabled for accesses via the page address. > + * > + * Pages will have match-all tags in the following circumstances: > + * > + * 1. Pages are being initialized for the first time, including during deferred > + * memory init; see the call to page_kasan_tag_reset in __init_single_page. > + * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the > + * exception of pages unpoisoned by kasan_unpoison_vmalloc. > + * 3. The allocation was excluded from being checked due to sampling, > * see the call to kasan_unpoison_pages. > * > * Poisoning pages during deferred memory init will greatly lengthen the > @@ -1377,10 +1372,10 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) > */ > static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) > { > - return deferred_pages_enabled() || > - (!IS_ENABLED(CONFIG_KASAN_GENERIC) && > - (fpi_flags & FPI_SKIP_KASAN_POISON)) || > - PageSkipKASanPoison(page); > + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) > + return deferred_pages_enabled(); > + > + return page_kasan_tag(page) == 0xff; > } > > static void kernel_init_pages(struct page *page, int numpages) > @@ -1754,7 +1749,7 @@ void __free_pages_core(struct page *page, unsigned int order) > * Bypass PCP and place fresh pages right to the tail, primarily > * relevant for memory onlining. > */ > - __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); > + __free_pages_ok(page, order, FPI_TO_TAIL); > } > > #ifdef CONFIG_NUMA > @@ -2456,9 +2451,9 @@ static inline bool should_skip_kasan_unpoison(gfp_t flags) > > /* > * With hardware tag-based KASAN enabled, skip if this has been > - * requested via __GFP_SKIP_KASAN_UNPOISON. > + * requested via __GFP_SKIP_KASAN. > */ > - return flags & __GFP_SKIP_KASAN_UNPOISON; > + return flags & __GFP_SKIP_KASAN; > } > > static inline bool should_skip_init(gfp_t flags) > @@ -2477,7 +2472,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order, > bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && > !should_skip_init(gfp_flags); > bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); > - bool reset_tags = true; > int i; > > set_page_private(page, 0); > @@ -2511,37 +2505,22 @@ inline void post_alloc_hook(struct page *page, unsigned int order, > /* Take note that memory was initialized by the loop above. */ > init = false; > } > - if (!should_skip_kasan_unpoison(gfp_flags)) { > - /* Try unpoisoning (or setting tags) and initializing memory. */ > - if (kasan_unpoison_pages(page, order, init)) { > - /* Take note that memory was initialized by KASAN. */ > - if (kasan_has_integrated_init()) > - init = false; > - /* Take note that memory tags were set by KASAN. */ > - reset_tags = false; > - } else { > - /* > - * KASAN decided to exclude this allocation from being > - * (un)poisoned due to sampling. Make KASAN skip > - * poisoning when the allocation is freed. > - */ > - SetPageSkipKASanPoison(page); > - } > - } > - /* > - * If memory tags have not been set by KASAN, reset the page tags to > - * ensure page_address() dereferencing does not fault. > - */ > - if (reset_tags) { > + if (!should_skip_kasan_unpoison(gfp_flags) && > + kasan_unpoison_pages(page, order, init)) { > + /* Take note that memory was initialized by KASAN. */ > + if (kasan_has_integrated_init()) > + init = false; > + } else { > + /* > + * If memory tags have not been set by KASAN, reset the page > + * tags to ensure page_address() dereferencing does not fault. > + */ > for (i = 0; i != 1 << order; ++i) > page_kasan_tag_reset(page + i); > } > /* If memory is still not initialized, initialize it now. */ > if (init) > kernel_init_pages(page, 1 << order); > - /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */ > - if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON)) > - SetPageSkipKASanPoison(page); > > set_page_owner(page, order, gfp_flags); > page_table_check_alloc(page, order); > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index ef910bf349e1..b0c84847e9b6 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -3170,7 +3170,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, > * pages backing VM_ALLOC mapping. Memory is instead > * poisoned and zeroed by kasan_unpoison_vmalloc(). > */ > - gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; > + gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; > } > > /* Take note that the mapping is PAGE_KERNEL. */ > -- > 2.39.2.722.g9855ee24e9-goog > Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Thank you, Peter!
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 5088637fe5c2..6583a58670c5 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -47,16 +47,14 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u #ifdef CONFIG_KASAN_HW_TAGS -#define ___GFP_SKIP_ZERO 0x1000000u -#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u -#define ___GFP_SKIP_KASAN_POISON 0x4000000u +#define ___GFP_SKIP_ZERO 0x1000000u +#define ___GFP_SKIP_KASAN 0x2000000u #else -#define ___GFP_SKIP_ZERO 0 -#define ___GFP_SKIP_KASAN_UNPOISON 0 -#define ___GFP_SKIP_KASAN_POISON 0 +#define ___GFP_SKIP_ZERO 0 +#define ___GFP_SKIP_KASAN 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x8000000u +#define ___GFP_NOLOCKDEP 0x4000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -234,25 +232,24 @@ typedef unsigned int __bitwise gfp_t; * memory tags at the same time as zeroing memory has minimal additional * performace impact. * - * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. - * Only effective in HW_TAGS mode. - * - * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. - * Typically, used for userspace pages. Only effective in HW_TAGS mode. + * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation. + * Used for userspace and vmalloc pages; the latter are unpoisoned by + * kasan_unpoison_vmalloc instead. For userspace pages, results in + * poisoning being skipped as well, see should_skip_kasan_poison for + * details. Only effective in HW_TAGS mode. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) -#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) -#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) +#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** @@ -335,8 +332,7 @@ typedef unsigned int __bitwise gfp_t; #define GFP_DMA __GFP_DMA #define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN) #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a7e3a3405520..74f81a52e7e1 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -135,9 +135,6 @@ enum pageflags { #ifdef CONFIG_ARCH_USES_PG_ARCH_X PG_arch_2, PG_arch_3, -#endif -#ifdef CONFIG_KASAN_HW_TAGS - PG_skip_kasan_poison, #endif __NR_PAGEFLAGS, @@ -594,12 +591,6 @@ TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif -#ifdef CONFIG_KASAN_HW_TAGS -PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD) -#else -PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) -#endif - /* * PageReported() is used to track reported free pages within the Buddy * allocator. We can use the non-atomic version of the test and set diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 9db52bc4ce19..232bc8efc98e 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -55,8 +55,7 @@ #ifdef CONFIG_KASAN_HW_TAGS #define __def_gfpflag_names_kasan , \ gfpflag_string(__GFP_SKIP_ZERO), \ - gfpflag_string(__GFP_SKIP_KASAN_POISON), \ - gfpflag_string(__GFP_SKIP_KASAN_UNPOISON) + gfpflag_string(__GFP_SKIP_KASAN) #else #define __def_gfpflag_names_kasan #endif @@ -96,12 +95,6 @@ #define IF_HAVE_PG_ARCH_X(flag,string) #endif -#ifdef CONFIG_KASAN_HW_TAGS -#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) ,{1UL << flag, string} -#else -#define IF_HAVE_PG_SKIP_KASAN_POISON(flag,string) -#endif - #define __def_pageflag_names \ {1UL << PG_locked, "locked" }, \ {1UL << PG_waiters, "waiters" }, \ @@ -130,8 +123,7 @@ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ IF_HAVE_PG_ARCH_X(PG_arch_2, "arch_2" ) \ -IF_HAVE_PG_ARCH_X(PG_arch_3, "arch_3" ) \ -IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") +IF_HAVE_PG_ARCH_X(PG_arch_3, "arch_3" ) #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index d1bcb0205327..bb4f56e5bdec 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -318,7 +318,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags * the first virtual mapping, which is created by vmalloc(). * Tagging the page_alloc memory backing that vmalloc() allocation is - * skipped, see ___GFP_SKIP_KASAN_UNPOISON. + * skipped, see ___GFP_SKIP_KASAN. * * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7136c36c5d01..0db33faf760d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -112,17 +112,6 @@ typedef int __bitwise fpi_t; */ #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) -/* - * Don't poison memory with KASAN (only for the tag-based modes). - * During boot, all non-reserved memblock memory is exposed to page_alloc. - * Poisoning all that memory lengthens boot time, especially on systems with - * large amount of RAM. This flag is used to skip that poisoning. - * This is only done for the tag-based KASAN modes, as those are able to - * detect memory corruptions with the memory tags assigned by default. - * All memory allocated normally after boot gets poisoned as usual. - */ -#define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) - /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) @@ -1355,13 +1344,19 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) /* * Skip KASAN memory poisoning when either: * - * 1. Deferred memory initialization has not yet completed, - * see the explanation below. - * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON, - * see the comment next to it. - * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON, - * see the comment next to it. - * 4. The allocation is excluded from being checked due to sampling, + * 1. For generic KASAN: deferred memory initialization has not yet completed. + * Tag-based KASAN modes skip pages freed via deferred memory initialization + * using page tags instead (see below). + * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating + * that error detection is disabled for accesses via the page address. + * + * Pages will have match-all tags in the following circumstances: + * + * 1. Pages are being initialized for the first time, including during deferred + * memory init; see the call to page_kasan_tag_reset in __init_single_page. + * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the + * exception of pages unpoisoned by kasan_unpoison_vmalloc. + * 3. The allocation was excluded from being checked due to sampling, * see the call to kasan_unpoison_pages. * * Poisoning pages during deferred memory init will greatly lengthen the @@ -1377,10 +1372,10 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) */ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) { - return deferred_pages_enabled() || - (!IS_ENABLED(CONFIG_KASAN_GENERIC) && - (fpi_flags & FPI_SKIP_KASAN_POISON)) || - PageSkipKASanPoison(page); + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) + return deferred_pages_enabled(); + + return page_kasan_tag(page) == 0xff; } static void kernel_init_pages(struct page *page, int numpages) @@ -1754,7 +1749,7 @@ void __free_pages_core(struct page *page, unsigned int order) * Bypass PCP and place fresh pages right to the tail, primarily * relevant for memory onlining. */ - __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); + __free_pages_ok(page, order, FPI_TO_TAIL); } #ifdef CONFIG_NUMA @@ -2456,9 +2451,9 @@ static inline bool should_skip_kasan_unpoison(gfp_t flags) /* * With hardware tag-based KASAN enabled, skip if this has been - * requested via __GFP_SKIP_KASAN_UNPOISON. + * requested via __GFP_SKIP_KASAN. */ - return flags & __GFP_SKIP_KASAN_UNPOISON; + return flags & __GFP_SKIP_KASAN; } static inline bool should_skip_init(gfp_t flags) @@ -2477,7 +2472,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order, bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && !should_skip_init(gfp_flags); bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); - bool reset_tags = true; int i; set_page_private(page, 0); @@ -2511,37 +2505,22 @@ inline void post_alloc_hook(struct page *page, unsigned int order, /* Take note that memory was initialized by the loop above. */ init = false; } - if (!should_skip_kasan_unpoison(gfp_flags)) { - /* Try unpoisoning (or setting tags) and initializing memory. */ - if (kasan_unpoison_pages(page, order, init)) { - /* Take note that memory was initialized by KASAN. */ - if (kasan_has_integrated_init()) - init = false; - /* Take note that memory tags were set by KASAN. */ - reset_tags = false; - } else { - /* - * KASAN decided to exclude this allocation from being - * (un)poisoned due to sampling. Make KASAN skip - * poisoning when the allocation is freed. - */ - SetPageSkipKASanPoison(page); - } - } - /* - * If memory tags have not been set by KASAN, reset the page tags to - * ensure page_address() dereferencing does not fault. - */ - if (reset_tags) { + if (!should_skip_kasan_unpoison(gfp_flags) && + kasan_unpoison_pages(page, order, init)) { + /* Take note that memory was initialized by KASAN. */ + if (kasan_has_integrated_init()) + init = false; + } else { + /* + * If memory tags have not been set by KASAN, reset the page + * tags to ensure page_address() dereferencing does not fault. + */ for (i = 0; i != 1 << order; ++i) page_kasan_tag_reset(page + i); } /* If memory is still not initialized, initialize it now. */ if (init) kernel_init_pages(page, 1 << order); - /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */ - if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON)) - SetPageSkipKASanPoison(page); set_page_owner(page, order, gfp_flags); page_table_check_alloc(page, order); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ef910bf349e1..b0c84847e9b6 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3170,7 +3170,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, * pages backing VM_ALLOC mapping. Memory is instead * poisoned and zeroed by kasan_unpoison_vmalloc(). */ - gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; + gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; } /* Take note that the mapping is PAGE_KERNEL. */
Code inspection reveals that PG_skip_kasan_poison is redundant with kasantag, because the former is intended to be set iff the latter is the match-all tag. It can also be observed that it's basically pointless to poison pages which have kasantag=0, because any pages with this tag would have been pointed to by pointers with match-all tags, so poisoning the pages would have little to no effect in terms of bug detection. Therefore, change the condition in should_skip_kasan_poison() to check kasantag instead, and remove PG_skip_kasan_poison and associated flags. Signed-off-by: Peter Collingbourne <pcc@google.com> Link: https://linux-review.googlesource.com/id/I57f825f2eaeaf7e8389d6cf4597c8a5821359838 --- v3: - update comments v2: - also remove GFP_SKIP_KASAN_POISON and FPI_SKIP_KASAN_POISON - rename GFP_SKIP_KASAN_UNPOISON to GFP_SKIP_KASAN - update comments - simplify control flow by removing reset_tags include/linux/gfp_types.h | 30 ++++++------- include/linux/page-flags.h | 9 ---- include/trace/events/mmflags.h | 12 +---- mm/kasan/hw_tags.c | 2 +- mm/page_alloc.c | 81 +++++++++++++--------------------- mm/vmalloc.c | 2 +- 6 files changed, 47 insertions(+), 89 deletions(-)