@@ -1428,17 +1428,21 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
return false;
}
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp)
{
if (!is_debug_pagealloc_cache(cachep))
return;
- if (map)
- debug_pagealloc_map_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE);
- else
- debug_pagealloc_unmap_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE);
+ debug_pagealloc_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE);
+}
+
+static void slab_kernel_unmap(struct kmem_cache *cachep, void *objp)
+{
+ if (!is_debug_pagealloc_cache(cachep))
+ return;
+
+ debug_pagealloc_unmap_pages(virt_to_page(objp),
+ cachep->size / PAGE_SIZE);
}
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
@@ -1585,7 +1589,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1);
+ slab_kernel_map(cachep, objp);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2360,7 +2364,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0);
+ slab_kernel_unmap(cachep, objp);
}
}
#endif
@@ -2728,7 +2732,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0);
+ slab_kernel_unmap(cachep, objp);
}
return objp;
}
@@ -2993,7 +2997,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1);
+ slab_kernel_map(cachep, objp);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)