@@ -112,13 +112,13 @@ static inline int slob_page_free(struct page *sp)
static void set_slob_page_free(struct page *sp, struct list_head *list)
{
- list_add(&sp->lru, list);
+ list_add(&sp->slab_list, list);
__SetPageSlobFree(sp);
}
static inline void clear_slob_page_free(struct page *sp)
{
- list_del(&sp->lru);
+ list_del(&sp->slab_list);
__ClearPageSlobFree(sp);
}
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, slob_list, lru) {
+ list_for_each_entry(sp, slob_list, slab_list) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
@@ -299,22 +299,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* Cache previous entry because slob_page_alloc() may
* remove sp from slob_list.
*/
- prev = list_prev_entry(sp, lru);
+ prev = list_prev_entry(sp, slab_list);
/* Attempt to alloc */
b = slob_page_alloc(sp, size, align);
if (!b)
continue;
- next = list_next_entry(prev, lru); /* This may or may not be sp */
+ next = list_next_entry(prev, slab_list); /* This may or may not be sp */
/*
* Improve fragment distribution and reduce our average
* search time by starting our next search here. (see
* Knuth vol 1, sec 2.5, pg 449)
*/
- if (!list_is_first(&next->lru, slob_list))
- list_rotate_to_front(&next->lru, slob_list);
+ if (!list_is_first(&next->slab_list, slob_list))
+ list_rotate_to_front(&next->slab_list, slob_list);
break;
}
@@ -331,7 +331,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
sp->freelist = b;
- INIT_LIST_HEAD(&sp->lru);
+ INIT_LIST_HEAD(&sp->slab_list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);