Message ID | 20201108065758.1815-2-rppt@kernel.org (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | arch, mm: improve robustness of direct map manipulation | expand |
On 11/8/20 7:57 AM, Mike Rapoport wrote: > --- a/mm/slab.c > +++ b/mm/slab.c > @@ -1428,21 +1428,19 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) > return false; > } > > -#ifdef CONFIG_DEBUG_PAGEALLOC > static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) > { > if (!is_debug_pagealloc_cache(cachep)) > return; Hmm, I didn't notice earlier, sorry. The is_debug_pagealloc_cache() above includes a debug_pagealloc_enabled_static() check, so it should be fine to use __kernel_map_pages() directly below. Otherwise we generate two static key checks for the same key needlessly. > > - kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); > + if (map) > + debug_pagealloc_map_pages(virt_to_page(objp), > + cachep->size / PAGE_SIZE); > + else > + debug_pagealloc_unmap_pages(virt_to_page(objp), > + cachep->size / PAGE_SIZE); > } > > -#else > -static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, > - int map) {} > - > -#endif > - > static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) > { > int size = cachep->object_size; > @@ -2062,7 +2060,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) > > #if DEBUG > /* > - * If we're going to use the generic kernel_map_pages() > + * If we're going to use the generic debug_pagealloc_map_pages() > * poisoning, then it's going to smash the contents of > * the redzone and userword anyhow, so switch them off. > */ >
On Mon, Nov 09, 2020 at 12:33:46PM +0100, Vlastimil Babka wrote: > On 11/8/20 7:57 AM, Mike Rapoport wrote: > > --- a/mm/slab.c > > +++ b/mm/slab.c > > @@ -1428,21 +1428,19 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) > > return false; > > } > > -#ifdef CONFIG_DEBUG_PAGEALLOC > > static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) > > { > > if (!is_debug_pagealloc_cache(cachep)) > > return; > > Hmm, I didn't notice earlier, sorry. > The is_debug_pagealloc_cache() above includes a > debug_pagealloc_enabled_static() check, so it should be fine to use > __kernel_map_pages() directly below. Otherwise we generate two static key > checks for the same key needlessly. Ok, I'll revert slab changes. > > - kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); > > + if (map) > > + debug_pagealloc_map_pages(virt_to_page(objp), > > + cachep->size / PAGE_SIZE); > > + else > > + debug_pagealloc_unmap_pages(virt_to_page(objp), > > + cachep->size / PAGE_SIZE); > > } > > -#else > > -static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, > > - int map) {} > > - > > -#endif > > - > > static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) > > { > > int size = cachep->object_size; > > @@ -2062,7 +2060,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) > > #if DEBUG > > /* > > - * If we're going to use the generic kernel_map_pages() > > + * If we're going to use the generic debug_pagealloc_map_pages() > > * poisoning, then it's going to smash the contents of > > * the redzone and userword anyhow, so switch them off. > > */ > > >
diff --git a/include/linux/mm.h b/include/linux/mm.h index ef360fe70aaf..bb8c70178f4e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2936,12 +2936,27 @@ kernel_map_pages(struct page *page, int numpages, int enable) { __kernel_map_pages(page, numpages, enable); } + +static inline void debug_pagealloc_map_pages(struct page *page, int numpages) +{ + if (debug_pagealloc_enabled_static()) + __kernel_map_pages(page, numpages, 1); +} + +static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) +{ + if (debug_pagealloc_enabled_static()) + __kernel_map_pages(page, numpages, 0); +} + #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ #else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ static inline void kernel_map_pages(struct page *page, int numpages, int enable) {} +static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} +static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} #ifdef CONFIG_HIBERNATION static inline bool kernel_page_present(struct page *page) { return true; } #endif /* CONFIG_HIBERNATION */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b44d4c7ba73b..f18f86ba2a68 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -614,8 +614,7 @@ void generic_online_page(struct page *page, unsigned int order) * so we should map it first. This is better than introducing a special * case in page freeing fast path. */ - if (debug_pagealloc_enabled_static()) - kernel_map_pages(page, 1 << order, 1); + debug_pagealloc_map_pages(page, 1 << order); __free_pages_core(page, order); totalram_pages_add(1UL << order); #ifdef CONFIG_HIGHMEM diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 23f5066bd4a5..db1bf70458d0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1272,8 +1272,7 @@ static __always_inline bool free_pages_prepare(struct page *page, */ arch_free_page(page, order); - if (debug_pagealloc_enabled_static()) - kernel_map_pages(page, 1 << order, 0); + debug_pagealloc_unmap_pages(page, 1 << order); kasan_free_nondeferred_pages(page, order); @@ -2270,8 +2269,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, set_page_refcounted(page); arch_alloc_page(page, order); - if (debug_pagealloc_enabled_static()) - kernel_map_pages(page, 1 << order, 1); + debug_pagealloc_map_pages(page, 1 << order); kasan_alloc_pages(page, order); kernel_poison_pages(page, 1 << order, 1); set_page_owner(page, order, gfp_flags); diff --git a/mm/slab.c b/mm/slab.c index b1113561b98b..07317386e150 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1428,21 +1428,19 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) return false; } -#ifdef CONFIG_DEBUG_PAGEALLOC static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) { if (!is_debug_pagealloc_cache(cachep)) return; - kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); + if (map) + debug_pagealloc_map_pages(virt_to_page(objp), + cachep->size / PAGE_SIZE); + else + debug_pagealloc_unmap_pages(virt_to_page(objp), + cachep->size / PAGE_SIZE); } -#else -static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, - int map) {} - -#endif - static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) { int size = cachep->object_size; @@ -2062,7 +2060,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) #if DEBUG /* - * If we're going to use the generic kernel_map_pages() + * If we're going to use the generic debug_pagealloc_map_pages() * poisoning, then it's going to smash the contents of * the redzone and userword anyhow, so switch them off. */