@@ -1964,49 +1964,48 @@ static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
-static void __free_slab(struct kmem_cache *s, struct page *page)
+static void __free_slab(struct kmem_cache *s, struct slab *slab)
{
- int order = compound_order(page);
+ struct page *page = slab_page(slab);
+ int order = slab_order(slab);
int pages = 1 << order;
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
void *p;
- slab_pad_check(s, page);
- for_each_object(p, s, page_address(page),
- page->objects)
- check_object(s, page, p, SLUB_RED_INACTIVE);
+ slab_pad_check(s, slab_page(slab));
+ for_each_object(p, s, slab_address(slab), slab->objects)
+ check_object(s, slab_page(slab), p, SLUB_RED_INACTIVE);
}
- __ClearPageSlabPfmemalloc(page);
+ __slab_clear_pfmemalloc(slab);
__ClearPageSlab(page);
- /* In union with page->mapping where page allocator expects NULL */
- page->slab_cache = NULL;
+ page->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- unaccount_slab_page(page, order, s);
+ unaccount_slab(slab, order, s);
__free_pages(page, order);
}
static void rcu_free_slab(struct rcu_head *h)
{
- struct page *page = container_of(h, struct page, rcu_head);
+ struct slab *slab = container_of(h, struct slab, rcu_head);
- __free_slab(page->slab_cache, page);
+ __free_slab(slab->slab_cache, slab);
}
-static void free_slab(struct kmem_cache *s, struct page *page)
+static void free_slab(struct kmem_cache *s, struct slab *slab)
{
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
- call_rcu(&page->rcu_head, rcu_free_slab);
+ call_rcu(&slab->rcu_head, rcu_free_slab);
} else
- __free_slab(s, page);
+ __free_slab(s, slab);
}
-static void discard_slab(struct kmem_cache *s, struct page *page)
+static void discard_slab(struct kmem_cache *s, struct slab *slab)
{
- dec_slabs_node(s, page_to_nid(page), page->objects);
- free_slab(s, page);
+ dec_slabs_node(s, slab_nid(slab), slab->objects);
+ free_slab(s, slab);
}
/*
@@ -2431,7 +2430,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
stat(s, DEACTIVATE_FULL);
else if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, slab_page(slab));
+ discard_slab(s, slab);
stat(s, FREE_SLAB);
}
}
@@ -2492,7 +2491,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
unusable = unusable->next;
stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, slab_page(slab));
+ discard_slab(s, slab);
stat(s, FREE_SLAB);
}
}
@@ -3387,7 +3386,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
- discard_slab(s, slab_page(slab));
+ discard_slab(s, slab);
}
/*
@@ -4257,7 +4256,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
spin_unlock_irq(&n->list_lock);
list_for_each_entry_safe(slab, h, &discard, slab_list)
- discard_slab(s, slab_page(slab));
+ discard_slab(s, slab);
}
bool __kmem_cache_empty(struct kmem_cache *s)
@@ -4606,7 +4605,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
/* Release empty slabs */
list_for_each_entry_safe(slab, t, &discard, slab_list)
- discard_slab(s, slab_page(slab));
+ discard_slab(s, slab);
if (slabs_node(s, node))
ret = 1;
Improve type safety by passing a slab pointer through discard_slab() to free_slab() and __free_slab(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-)