Message ID | 20241112-slub-percpu-caches-v1-6-ddc0bdc27e05@suse.cz (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | SLUB percpu sheaves | expand |
On Wed, Nov 13, 2024 at 1:39 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > Add three functions for efficient guaranteed allocations in a critical > section (that cannot sleep) when the exact number of allocations is not > known beforehand, but an upper limit can be calculated. > > kmem_cache_prefill_sheaf() returns a sheaf containing at least given > number of objects. > > kmem_cache_alloc_from_sheaf() will allocate an object from the sheaf > and is guaranteed not to fail until depleted. > > kmem_cache_return_sheaf() is for giving the sheaf back to the slab > allocator after the critical section. This will also attempt to refill > it to cache's sheaf capacity for better efficiency of sheaves handling, > but it's not stricly necessary to succeed. > > TODO: the current implementation is limited to cache's sheaf_capacity > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > --- > include/linux/slab.h | 11 ++++ > mm/slub.c | 149 +++++++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 160 insertions(+) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index 23904321992ad2eeb9389d0883cf4d5d5d71d896..a87dc3c6392fe235de2eabe1792df86d40c3bbf9 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -820,6 +820,17 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, > int node) __assume_slab_alignment __malloc; > #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) > > +struct slab_sheaf * > +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count); > + > +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf); > + > +void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp, > + struct slab_sheaf *sheaf) __assume_slab_alignment __malloc; > +#define kmem_cache_alloc_from_sheaf(...) \ > + alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__)) > + > /* > * These macros allow declaring a kmem_buckets * parameter alongside size, which > * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call > diff --git a/mm/slub.c b/mm/slub.c > index 1900afa6153ca6d88f9df7db3ce84d98629489e7..a0e2cb7dfb5173f39f36bea1eb9760c3c1b99dd7 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -444,6 +444,7 @@ struct slab_sheaf { > union { > struct rcu_head rcu_head; > struct list_head barn_list; > + bool oversize; > }; > struct kmem_cache *cache; > unsigned int size; > @@ -2819,6 +2820,30 @@ static int barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf, > return ret; > } > > +static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn) > +{ > + struct slab_sheaf *sheaf = NULL; > + unsigned long flags; > + > + spin_lock_irqsave(&barn->lock, flags); > + > + if (barn->nr_empty) { > + sheaf = list_first_entry(&barn->sheaves_empty, > + struct slab_sheaf, barn_list); > + list_del(&sheaf->barn_list); > + barn->nr_empty--; > + } else if (barn->nr_full) { > + sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf, > + barn_list); > + list_del(&sheaf->barn_list); > + barn->nr_full--; > + } > + > + spin_unlock_irqrestore(&barn->lock, flags); > + > + return sheaf; > +} > + > /* > * If a full sheaf is available, return it and put the supplied empty one to > * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't > @@ -4893,6 +4918,130 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int nod > } > EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); > > + > +/* > + * returns a sheaf that has least the given count of objects > + * when prefilling is needed, do so with given gfp flags > + * > + * return NULL if prefilling failed, or when the requested count is > + * above cache's sheaf_capacity (TODO: lift this limitation) > + */ > +struct slab_sheaf * > +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count) > +{ > + struct slub_percpu_sheaves *pcs; > + struct slab_sheaf *sheaf = NULL; > + > + //TODO: handle via oversize sheaf > + if (count > s->sheaf_capacity) > + return NULL; > + > + pcs = cpu_sheaves_lock(s->cpu_sheaves); > + > + if (pcs->spare && pcs->spare->size > 0) { > + sheaf = pcs->spare; > + pcs->spare = NULL; > + } > + > + if (!sheaf) > + sheaf = barn_get_full_or_empty_sheaf(pcs->barn); > + > + cpu_sheaves_unlock(s->cpu_sheaves); > + > + if (!sheaf) > + sheaf = alloc_empty_sheaf(s, gfp); > + > + if (sheaf && sheaf->size < count) { > + if (refill_sheaf(s, sheaf, gfp)) { > + sheaf_flush(s, sheaf); > + free_empty_sheaf(s, sheaf); > + sheaf = NULL; > + } > + } > + > + return sheaf; > +} > + > +/* > + * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() > + * It tries to refill the sheaf back to the cache's sheaf_capacity > + * to avoid handling partially full sheaves. > + * > + * If the refill fails because gfp is e.g. GFP_NOWAIT, the sheaf is > + * instead dissolved > + */ > +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf) > +{ > + struct slub_percpu_sheaves *pcs; > + bool refill = false; > + struct node_barn *barn; > + > + //TODO: handle oversize sheaf > + > + pcs = cpu_sheaves_lock(s->cpu_sheaves); > + > + if (!pcs->spare) { > + pcs->spare = sheaf; > + sheaf = NULL; > + } > + > + /* racy check */ > + if (!sheaf && pcs->barn->nr_full >= MAX_FULL_SHEAVES) { > + barn = pcs->barn; > + refill = true; > + } > + > + cpu_sheaves_unlock(s->cpu_sheaves); > + > + if (!sheaf) > + return; > + > + /* > + * if the barn is full of full sheaves or we fail to refill the sheaf, > + * simply flush and free it > + */ > + if (!refill || refill_sheaf(s, sheaf, gfp)) { > + sheaf_flush(s, sheaf); > + free_empty_sheaf(s, sheaf); > + return; > + } > + > + /* we racily determined the sheaf would fit, so now force it */ > + barn_put_full_sheaf(barn, sheaf, true); > +} > + > +/* > + * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() > + * > + * Guaranteed not to fail as many allocations as was the requested count. > + * After the sheaf is emptied, it fails - no fallback to the slab cache itself. > + * > + * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT > + * memcg charging is forced over limit if necessary, to avoid failure. > + */ > +void * > +kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf) > +{ > + void *ret = NULL; > + bool init; > + > + if (sheaf->size == 0) > + goto out; > + > + ret = sheaf->objects[--sheaf->size]; > + > + init = slab_want_init_on_alloc(gfp, s); > + > + /* add __GFP_NOFAIL to force successful memcg charging */ > + slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); Maybe I'm missing something, but how can this be used for non-sleepable contexts if __GFP_NOFAIL is used? I think we have to charge them when the sheaf is returned via kmem_cache_prefill_sheaf(), just like users of bulk alloc/free? Best, Hyeonggon > +out: > + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); > + > + return ret; > +} > + > /* > * To avoid unnecessary overhead, we pass through large allocation requests > * directly to the page allocator. We use __GFP_COMP, because we will need to > > -- > 2.47.0 >
On 11/18/24 14:13, Hyeonggon Yoo wrote: > On Wed, Nov 13, 2024 at 1:39 AM Vlastimil Babka <vbabka@suse.cz> wrote: >> + >> +/* >> + * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() >> + * >> + * Guaranteed not to fail as many allocations as was the requested count. >> + * After the sheaf is emptied, it fails - no fallback to the slab cache itself. >> + * >> + * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT >> + * memcg charging is forced over limit if necessary, to avoid failure. >> + */ >> +void * >> +kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, >> + struct slab_sheaf *sheaf) >> +{ >> + void *ret = NULL; >> + bool init; >> + >> + if (sheaf->size == 0) >> + goto out; >> + >> + ret = sheaf->objects[--sheaf->size]; >> + >> + init = slab_want_init_on_alloc(gfp, s); >> + >> + /* add __GFP_NOFAIL to force successful memcg charging */ >> + slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); > > Maybe I'm missing something, but how can this be used for non-sleepable contexts > if __GFP_NOFAIL is used? I think we have to charge them when the sheaf AFAIK it forces memcg to simply charge even if allocated memory goes over the memcg limit. So there's no issue with a non-sleepable context, there shouldn't be memcg reclaim happening in that case. > is returned > via kmem_cache_prefill_sheaf(), just like users of bulk alloc/free? That would be very costly to charge/uncharge if most of the objects are not actually used - it's what we want to avoid here. Going over the memcgs limit a bit in a very rare case isn't considered such an issue, for example Linus advocated such approach too in another context. > Best, > Hyeonggon > >> +out: >> + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); >> + >> + return ret; >> +} >> + >> /* >> * To avoid unnecessary overhead, we pass through large allocation requests >> * directly to the page allocator. We use __GFP_COMP, because we will need to >> >> -- >> 2.47.0 >>
On Mon, Nov 18, 2024 at 11:26 PM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 11/18/24 14:13, Hyeonggon Yoo wrote: > > On Wed, Nov 13, 2024 at 1:39 AM Vlastimil Babka <vbabka@suse.cz> wrote: > >> + > >> +/* > >> + * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() > >> + * > >> + * Guaranteed not to fail as many allocations as was the requested count. > >> + * After the sheaf is emptied, it fails - no fallback to the slab cache itself. > >> + * > >> + * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT > >> + * memcg charging is forced over limit if necessary, to avoid failure. > >> + */ > >> +void * > >> +kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, > >> + struct slab_sheaf *sheaf) > >> +{ > >> + void *ret = NULL; > >> + bool init; > >> + > >> + if (sheaf->size == 0) > >> + goto out; > >> + > >> + ret = sheaf->objects[--sheaf->size]; > >> + > >> + init = slab_want_init_on_alloc(gfp, s); > >> + > >> + /* add __GFP_NOFAIL to force successful memcg charging */ > >> + slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); > > > > Maybe I'm missing something, but how can this be used for non-sleepable contexts > > if __GFP_NOFAIL is used? I think we have to charge them when the sheaf > > AFAIK it forces memcg to simply charge even if allocated memory goes over > the memcg limit. So there's no issue with a non-sleepable context, there > shouldn't be memcg reclaim happening in that case. Ok, but I am still worried about mem alloc profiling/memcg trying to allocate some memory with __GFP_NOFAIL flag and eventually passing it to the buddy allocator, which does not want __GFP_NOFAIL without __GFP_DIRECT_RECLAIM? e.g.) memcg hook calls alloc_slab_obj_exts()->kcalloc_node()->....->alloc_pages() > > is returned > > via kmem_cache_prefill_sheaf(), just like users of bulk alloc/free? > > That would be very costly to charge/uncharge if most of the objects are not > actually used - it's what we want to avoid here. > Going over the memcgs limit a bit in a very rare case isn't considered such > an issue, for example Linus advocated such approach too in another context. Thanks for the explanation! That was a point I was missing. > > Best, > > Hyeonggon > > > >> +out: > >> + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); > >> + > >> + return ret; > >> +} > >> + > >> /* > >> * To avoid unnecessary overhead, we pass through large allocation requests > >> * directly to the page allocator. We use __GFP_COMP, because we will need to > >> > >> -- > >> 2.47.0 > >> >
On 11/19/24 03:29, Hyeonggon Yoo wrote: > On Mon, Nov 18, 2024 at 11:26 PM Vlastimil Babka <vbabka@suse.cz> wrote: >> >> On 11/18/24 14:13, Hyeonggon Yoo wrote: >> > On Wed, Nov 13, 2024 at 1:39 AM Vlastimil Babka <vbabka@suse.cz> wrote: >> >> + >> >> +/* >> >> + * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() >> >> + * >> >> + * Guaranteed not to fail as many allocations as was the requested count. >> >> + * After the sheaf is emptied, it fails - no fallback to the slab cache itself. >> >> + * >> >> + * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT >> >> + * memcg charging is forced over limit if necessary, to avoid failure. >> >> + */ >> >> +void * >> >> +kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, >> >> + struct slab_sheaf *sheaf) >> >> +{ >> >> + void *ret = NULL; >> >> + bool init; >> >> + >> >> + if (sheaf->size == 0) >> >> + goto out; >> >> + >> >> + ret = sheaf->objects[--sheaf->size]; >> >> + >> >> + init = slab_want_init_on_alloc(gfp, s); >> >> + >> >> + /* add __GFP_NOFAIL to force successful memcg charging */ >> >> + slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); >> > >> > Maybe I'm missing something, but how can this be used for non-sleepable contexts >> > if __GFP_NOFAIL is used? I think we have to charge them when the sheaf >> >> AFAIK it forces memcg to simply charge even if allocated memory goes over >> the memcg limit. So there's no issue with a non-sleepable context, there >> shouldn't be memcg reclaim happening in that case. > > Ok, but I am still worried about mem alloc profiling/memcg trying to > allocate some memory > with __GFP_NOFAIL flag and eventually passing it to the buddy allocator, > which does not want __GFP_NOFAIL without __GFP_DIRECT_RECLAIM? > e.g.) memcg hook calls > alloc_slab_obj_exts()->kcalloc_node()->....->alloc_pages() alloc_slab_obj_exts() removes __GFP_NOFAIL via OBJCGS_CLEAR_MASK so that's fine. I think kmemleak_alloc_recursive() is also fine as it ends up in mem_pool_alloc() and will clear __GFP_NOFAIL it via gfp_nested_mask() Hope I'm not missing something else. >> > is returned >> > via kmem_cache_prefill_sheaf(), just like users of bulk alloc/free? >> >> That would be very costly to charge/uncharge if most of the objects are not >> actually used - it's what we want to avoid here. >> Going over the memcgs limit a bit in a very rare case isn't considered such >> an issue, for example Linus advocated such approach too in another context. > > Thanks for the explanation! That was a point I was missing. > >> > Best, >> > Hyeonggon >> > >> >> +out: >> >> + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); >> >> + >> >> + return ret; >> >> +} >> >> + >> >> /* >> >> * To avoid unnecessary overhead, we pass through large allocation requests >> >> * directly to the page allocator. We use __GFP_COMP, because we will need to >> >> >> >> -- >> >> 2.47.0 >> >> >>
diff --git a/include/linux/slab.h b/include/linux/slab.h index 23904321992ad2eeb9389d0883cf4d5d5d71d896..a87dc3c6392fe235de2eabe1792df86d40c3bbf9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -820,6 +820,17 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment __malloc; #define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) +struct slab_sheaf * +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count); + +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf); + +void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp, + struct slab_sheaf *sheaf) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_from_sheaf(...) \ + alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__)) + /* * These macros allow declaring a kmem_buckets * parameter alongside size, which * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call diff --git a/mm/slub.c b/mm/slub.c index 1900afa6153ca6d88f9df7db3ce84d98629489e7..a0e2cb7dfb5173f39f36bea1eb9760c3c1b99dd7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -444,6 +444,7 @@ struct slab_sheaf { union { struct rcu_head rcu_head; struct list_head barn_list; + bool oversize; }; struct kmem_cache *cache; unsigned int size; @@ -2819,6 +2820,30 @@ static int barn_put_full_sheaf(struct node_barn *barn, struct slab_sheaf *sheaf, return ret; } +static struct slab_sheaf *barn_get_full_or_empty_sheaf(struct node_barn *barn) +{ + struct slab_sheaf *sheaf = NULL; + unsigned long flags; + + spin_lock_irqsave(&barn->lock, flags); + + if (barn->nr_empty) { + sheaf = list_first_entry(&barn->sheaves_empty, + struct slab_sheaf, barn_list); + list_del(&sheaf->barn_list); + barn->nr_empty--; + } else if (barn->nr_full) { + sheaf = list_first_entry(&barn->sheaves_full, struct slab_sheaf, + barn_list); + list_del(&sheaf->barn_list); + barn->nr_full--; + } + + spin_unlock_irqrestore(&barn->lock, flags); + + return sheaf; +} + /* * If a full sheaf is available, return it and put the supplied empty one to * barn. We ignore the limit on empty sheaves as the number of sheaves doesn't @@ -4893,6 +4918,130 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int nod } EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); + +/* + * returns a sheaf that has least the given count of objects + * when prefilling is needed, do so with given gfp flags + * + * return NULL if prefilling failed, or when the requested count is + * above cache's sheaf_capacity (TODO: lift this limitation) + */ +struct slab_sheaf * +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int count) +{ + struct slub_percpu_sheaves *pcs; + struct slab_sheaf *sheaf = NULL; + + //TODO: handle via oversize sheaf + if (count > s->sheaf_capacity) + return NULL; + + pcs = cpu_sheaves_lock(s->cpu_sheaves); + + if (pcs->spare && pcs->spare->size > 0) { + sheaf = pcs->spare; + pcs->spare = NULL; + } + + if (!sheaf) + sheaf = barn_get_full_or_empty_sheaf(pcs->barn); + + cpu_sheaves_unlock(s->cpu_sheaves); + + if (!sheaf) + sheaf = alloc_empty_sheaf(s, gfp); + + if (sheaf && sheaf->size < count) { + if (refill_sheaf(s, sheaf, gfp)) { + sheaf_flush(s, sheaf); + free_empty_sheaf(s, sheaf); + sheaf = NULL; + } + } + + return sheaf; +} + +/* + * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() + * It tries to refill the sheaf back to the cache's sheaf_capacity + * to avoid handling partially full sheaves. + * + * If the refill fails because gfp is e.g. GFP_NOWAIT, the sheaf is + * instead dissolved + */ +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf) +{ + struct slub_percpu_sheaves *pcs; + bool refill = false; + struct node_barn *barn; + + //TODO: handle oversize sheaf + + pcs = cpu_sheaves_lock(s->cpu_sheaves); + + if (!pcs->spare) { + pcs->spare = sheaf; + sheaf = NULL; + } + + /* racy check */ + if (!sheaf && pcs->barn->nr_full >= MAX_FULL_SHEAVES) { + barn = pcs->barn; + refill = true; + } + + cpu_sheaves_unlock(s->cpu_sheaves); + + if (!sheaf) + return; + + /* + * if the barn is full of full sheaves or we fail to refill the sheaf, + * simply flush and free it + */ + if (!refill || refill_sheaf(s, sheaf, gfp)) { + sheaf_flush(s, sheaf); + free_empty_sheaf(s, sheaf); + return; + } + + /* we racily determined the sheaf would fit, so now force it */ + barn_put_full_sheaf(barn, sheaf, true); +} + +/* + * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() + * + * Guaranteed not to fail as many allocations as was the requested count. + * After the sheaf is emptied, it fails - no fallback to the slab cache itself. + * + * The gfp parameter is meant only to specify __GFP_ZERO or __GFP_ACCOUNT + * memcg charging is forced over limit if necessary, to avoid failure. + */ +void * +kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *s, gfp_t gfp, + struct slab_sheaf *sheaf) +{ + void *ret = NULL; + bool init; + + if (sheaf->size == 0) + goto out; + + ret = sheaf->objects[--sheaf->size]; + + init = slab_want_init_on_alloc(gfp, s); + + /* add __GFP_NOFAIL to force successful memcg charging */ + slab_post_alloc_hook(s, NULL, gfp | __GFP_NOFAIL, 1, &ret, init, s->object_size); +out: + trace_kmem_cache_alloc(_RET_IP_, ret, s, gfp, NUMA_NO_NODE); + + return ret; +} + /* * To avoid unnecessary overhead, we pass through large allocation requests * directly to the page allocator. We use __GFP_COMP, because we will need to
Add three functions for efficient guaranteed allocations in a critical section (that cannot sleep) when the exact number of allocations is not known beforehand, but an upper limit can be calculated. kmem_cache_prefill_sheaf() returns a sheaf containing at least given number of objects. kmem_cache_alloc_from_sheaf() will allocate an object from the sheaf and is guaranteed not to fail until depleted. kmem_cache_return_sheaf() is for giving the sheaf back to the slab allocator after the critical section. This will also attempt to refill it to cache's sheaf capacity for better efficiency of sheaves handling, but it's not stricly necessary to succeed. TODO: the current implementation is limited to cache's sheaf_capacity Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- include/linux/slab.h | 11 ++++ mm/slub.c | 149 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+)