@@ -559,6 +559,7 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
raw_spin_lock_nested(&rq->__lock, subclass);
/* preempt_count *MUST* be > 1 */
preempt_enable_no_resched();
+ printk_deferred_enter();
return;
}
@@ -568,6 +569,7 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
if (likely(lock == __rq_lockp(rq))) {
/* preempt_count *MUST* be > 1 */
preempt_enable_no_resched();
+ printk_deferred_enter();
return;
}
raw_spin_unlock(lock);
@@ -584,6 +586,8 @@ bool raw_spin_rq_trylock(struct rq *rq)
if (sched_core_disabled()) {
ret = raw_spin_trylock(&rq->__lock);
preempt_enable();
+ if (ret)
+ printk_deferred_enter();
return ret;
}
@@ -592,6 +596,8 @@ bool raw_spin_rq_trylock(struct rq *rq)
ret = raw_spin_trylock(lock);
if (!ret || (likely(lock == __rq_lockp(rq)))) {
preempt_enable();
+ if (ret)
+ printk_deferred_enter();
return ret;
}
raw_spin_unlock(lock);
@@ -600,6 +606,7 @@ bool raw_spin_rq_trylock(struct rq *rq)
void raw_spin_rq_unlock(struct rq *rq)
{
+ printk_deferred_exit();
raw_spin_unlock(rq_lockp(rq));
}
syzbot is reporting circular locking dependency inside __bpf_prog_run() when trace_sched_switch() hook is called from __schedule(), for fault injection calls printk() despite rq lock is already held. Since any debugging functionality such as lockdep, fault injection, KASAN/KCSAN/KMSAN etc. might call printk(), guard the whole section between raw_spin_rq_{lock,lock_nested,trylock}() and raw_spin_rq_unlock() using printk_deferred_{enter,exit}(). Reported-by: syzbot <syzbot+18cfb7f63482af8641df@syzkaller.appspotmail.com> Closes: https://syzkaller.appspot.com/bug?extid=18cfb7f63482af8641df Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> --- This is a repost of https://lkml.kernel.org/r/b55e5f24-01ad-4a3d-94dc-e8a6bc15ac42@I-love.SAKURA.ne.jp . Scheduler developers, can you agree with addressing this problem at locations where rq lock is held? kernel/sched/core.c | 7 +++++++ 1 file changed, 7 insertions(+)