@@ -423,6 +423,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
{
bool ret = true;
+ int loop = 0;
rcu_read_lock();
while (__mutex_owner(lock) == owner) {
@@ -436,9 +437,11 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
/*
* Use vcpu_is_preempted to detect lock holder preemption issue.
+ * As vcpu_is_preempted is more costly to use, it is called at
+ * a reduced frequencey (once every 256 iterations).
*/
if (!owner->on_cpu || need_resched() ||
- vcpu_is_preempted(task_cpu(owner))) {
+ (!(++loop & 0xff) && vcpu_is_preempted(task_cpu(owner)))) {
ret = false;
break;
}
@@ -351,6 +351,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner = READ_ONCE(sem->owner);
+ int loop = 0;
if (!rwsem_owner_is_writer(owner))
goto out;
@@ -367,10 +368,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
/*
* abort spinning when need_resched or owner is not running or
- * owner's cpu is preempted.
+ * owner's cpu is preempted. The preemption check is done at
+ * a lower frequencey because of its high cost.
*/
if (!owner->on_cpu || need_resched() ||
- vcpu_is_preempted(task_cpu(owner))) {
+ (!(++loop & 0xff) && vcpu_is_preempted(task_cpu(owner)))) {
rcu_read_unlock();
return false;
}
As the vcpu_is_preempted() call is pretty costly compared with other checks within mutex_spin_on_owner() and rwsem_spin_on_owner(), they are done at a reduce frequency of once every 256 iterations. Signed-off-by: Waiman Long <longman@redhat.com> --- kernel/locking/mutex.c | 5 ++++- kernel/locking/rwsem-xadd.c | 6 ++++-- 2 files changed, 8 insertions(+), 3 deletions(-)