@@ -351,8 +351,8 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
-extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
-extern void lockdep_clear_current_reclaim_state(void);
+extern gfp_t lockdep_set_current_reclaim_state(gfp_t gfp_mask);
+extern void lockdep_restore_current_reclaim_state(gfp_t old_mask);
extern void lockdep_trace_alloc(gfp_t mask);
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
@@ -379,8 +379,8 @@ static inline void lockdep_on(void)
# define lock_release(l, n, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_set_current_reclaim_state(g) do { } while (0)
-# define lockdep_clear_current_reclaim_state() do { } while (0)
+# define lockdep_set_current_reclaim_state(g) (0)
+# define lockdep_restore_current_reclaim_state(g) do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
@@ -3645,14 +3645,16 @@ int lock_is_held(struct lockdep_map *lock)
}
EXPORT_SYMBOL_GPL(lock_is_held);
-void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
+gfp_t lockdep_set_current_reclaim_state(gfp_t gfp_mask)
{
+ gfp_t old = current->lockdep_reclaim_gfp;
current->lockdep_reclaim_gfp = gfp_mask;
+ return old;
}
-void lockdep_clear_current_reclaim_state(void)
+void lockdep_restore_current_reclaim_state(gfp_t gfp_mask)
{
- current->lockdep_reclaim_gfp = 0;
+ current->lockdep_reclaim_gfp = gfp_mask;
}
#ifdef CONFIG_LOCK_STAT
@@ -2327,20 +2327,21 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
struct reclaim_state reclaim_state;
int progress;
unsigned int pflags;
+ gfp_t old_mask;
cond_resched();
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
current_set_flags_nested(&pflags, PF_MEMALLOC);
- lockdep_set_current_reclaim_state(gfp_mask);
+ old_mask = lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
current->reclaim_state = NULL;
- lockdep_clear_current_reclaim_state();
+ lockdep_restore_current_reclaim_state(old_mask);
current_restore_flags_nested(&pflags, PF_MEMALLOC);
cond_resched();
@@ -3344,16 +3344,17 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
struct task_struct *p = current;
unsigned long nr_reclaimed;
unsigned int pflags;
+ gfp_t old_mask;
current_set_flags_nested(&pflags, PF_MEMALLOC);
- lockdep_set_current_reclaim_state(sc.gfp_mask);
+ old_mask = lockdep_set_current_reclaim_state(sc.gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
p->reclaim_state = NULL;
- lockdep_clear_current_reclaim_state();
+ lockdep_restore_current_reclaim_state(old_mask);
current_restore_flags_nested(&pflags, PF_MEMALLOC);
return nr_reclaimed;
@@ -3532,6 +3533,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
};
unsigned long nr_slab_pages0, nr_slab_pages1;
unsigned int pflags;
+ gfp_t old_mask;
cond_resched();
/*
@@ -3540,7 +3542,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* and RECLAIM_SWAP.
*/
current_set_flags_nested(&pflags, PF_MEMALLOC | PF_SWAPWRITE);
- lockdep_set_current_reclaim_state(gfp_mask);
+ old_mask = lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -3590,7 +3592,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
p->reclaim_state = NULL;
current_restore_flags_nested(&pflags, PF_MEMALLOC | PF_SWAPWRITE);
- lockdep_clear_current_reclaim_state();
+ lockdep_restore_current_reclaim_state(old_mask);
return sc.nr_reclaimed >= nr_pages;
}
Currently kswapd sets current->lockdep_reclaim_gfp but the first memory allocation call will clear it. So the setting does no good. Thus the lockdep_set_current_reclaim_state call in kswapd() is ineffective. With this patch we always save the old value and then restore it, so lockdep gets to properly check the locks that kswapd takes. Signed-off-by: NeilBrown <neilb@suse.de> --- include/linux/lockdep.h | 8 ++++---- kernel/locking/lockdep.c | 8 +++++--- mm/page_alloc.c | 5 +++-- mm/vmscan.c | 10 ++++++---- 4 files changed, 18 insertions(+), 13 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html