diff mbox series

[RFC,55/86] xarray: add cond_resched_xas_rcu() and cond_resched_xas_lock_irq()

Message ID 20231107215742.363031-56-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series Make the kernel preemptible | expand

Commit Message

Ankur Arora Nov. 7, 2023, 9:57 p.m. UTC
xarray code has a common open-coded pattern where we do a flush,
release a lock and/or irq (allowing rescheduling to happen) and
reacquire the resource.

Add helpers to do that. Also remove the cond_resched() call which,
with always-on CONFIG_PREEMPTION, is not needed anymore.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 include/linux/xarray.h | 14 ++++++++++++++
 kernel/sched/core.c    | 17 +++++++++++++++++
 2 files changed, 31 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index cb571dfcf4b1..30b1181219a3 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1883,4 +1883,18 @@  static inline void *xas_next(struct xa_state *xas)
 	return xa_entry(xas->xa, node, xas->xa_offset);
 }
 
+/**
+ * xas_cond_resched_rcu - if a reschedule is needed, allow RCU to
+ * end this read-side critical section, potentially rescheduling,
+ * and begin another.
+ */
+static inline void cond_resched_xas_rcu(struct xa_state *xas)
+{
+	if (need_resched()) {
+		xas_pause(xas);
+		cond_resched_rcu();
+	}
+}
+extern void cond_resched_xas_lock_irq(struct xa_state *xas);
+
 #endif /* _LINUX_XARRAY_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ea00e8489ebb..3467a3a7d4bf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8664,6 +8664,23 @@  int __cond_resched_stall(void)
 }
 EXPORT_SYMBOL(__cond_resched_stall);
 
+/**
+ * xas_cond_resched_lock_irq - safely drop the xarray lock, enable IRQs
+ * (which might cause a reschedule), and reacquire the lock.
+ */
+void cond_resched_xas_lock_irq(struct xa_state *xas)
+{
+	lockdep_assert_irqs_disabled();
+
+	xas_pause(xas);
+	xas_unlock_irq(xas);
+
+	__might_resched(__FILE__, __LINE__, 0);
+
+	xas_lock_irq(xas);
+}
+EXPORT_SYMBOL(cond_resched_xas_lock_irq);
+
 /**
  * yield - yield the current processor to other threads.
  *