@@ -397,10 +397,6 @@ static bool rcu_kick_kthreads;
static int rcu_divisor = 7;
module_param(rcu_divisor, int, 0644);
-/* Force an exit from rcu_do_batch() after 3 milliseconds. */
-static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
-module_param(rcu_resched_ns, long, 0644);
-
/*
* How long the grace period must be before we start recruiting
* quiescent-state help from rcu_note_context_switch().
@@ -2050,7 +2046,7 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
* Invoke any RCU callbacks that have made it to the end of their grace
* period. Throttle as specified by rdp->blimit.
*/
-static void rcu_do_batch(struct rcu_data *rdp)
+static void rcu_do_batch(struct softirq_action *h, struct rcu_data *rdp)
{
int div;
bool __maybe_unused empty;
@@ -2058,7 +2054,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
struct rcu_head *rhp;
struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
long bl, count = 0;
- long pending, tlimit = 0;
+ long pending;
/* If no callbacks are ready, just return. */
if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
@@ -2082,12 +2078,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
div = READ_ONCE(rcu_divisor);
div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
bl = max(rdp->blimit, pending >> div);
- if (in_serving_softirq() && unlikely(bl > 100)) {
- long rrn = READ_ONCE(rcu_resched_ns);
-
- rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
- tlimit = local_clock() + rrn;
- }
trace_rcu_batch_start(rcu_state.name,
rcu_segcblist_n_cbs(&rdp->cblist), bl);
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
@@ -2126,13 +2116,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
* Make sure we don't spend too much time here and deprive other
* softirq vectors of CPU cycles.
*/
- if (unlikely(tlimit)) {
- /* only call local_clock() every 32 callbacks */
- if (likely((count & 31) || local_clock() < tlimit))
- continue;
- /* Exceeded the time limit, so leave. */
+ if (unlikely(!(count & 31)) && softirq_needs_break(h))
break;
- }
} else {
// In rcuoc context, so no worries about depriving
// other softirq vectors of CPU cycles.
@@ -2320,7 +2305,7 @@ static void strict_work_handler(struct work_struct *work)
}
/* Perform RCU core processing work for the current CPU. */
-static __latent_entropy void rcu_core(void)
+static __latent_entropy void rcu_core(struct softirq_action *h)
{
unsigned long flags;
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
@@ -2374,7 +2359,7 @@ static __latent_entropy void rcu_core(void)
/* If there are callbacks ready, invoke them. */
if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
likely(READ_ONCE(rcu_scheduler_fully_active))) {
- rcu_do_batch(rdp);
+ rcu_do_batch(h, rdp);
/* Re-invoke RCU core processing if there are callbacks remaining. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
invoke_rcu_core();
@@ -2391,7 +2376,7 @@ static __latent_entropy void rcu_core(void)
static void rcu_core_si(struct softirq_action *h)
{
- rcu_core();
+ rcu_core(h);
}
static void rcu_wake_cond(struct task_struct *t, int status)
@@ -2462,7 +2447,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
*workp = 0;
local_irq_enable();
if (work)
- rcu_core();
+ rcu_core(NULL);
local_bh_enable();
if (*workp == 0) {
trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
@@ -951,7 +951,7 @@ static void nocb_cb_wait(struct rcu_data *rdp)
* instances of this callback would execute concurrently.
*/
local_bh_disable();
- rcu_do_batch(rdp);
+ rcu_do_batch(NULL, rdp);
local_bh_enable();
lockdep_assert_irqs_enabled();
rcu_nocb_lock_irqsave(rdp, flags);