@@ -89,6 +89,8 @@ struct rcu_data {
/* 3) idle CPUs handling */
struct timer idle_timer;
bool idle_timer_active;
+
+ bool process_callbacks;
};
/*
@@ -194,7 +196,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
struct rcu_ctrlblk *rcp)
{
cpumask_t cpumask;
- raise_softirq(SCHEDULE_SOFTIRQ);
+ raise_softirq(RCU_SOFTIRQ);
if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
rdp->last_rs_qlen = rdp->qlen;
/*
@@ -202,7 +204,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
* rdp->cpu is the current cpu.
*/
cpumask_andnot(&cpumask, &rcp->cpumask, cpumask_of(rdp->cpu));
- cpumask_raise_softirq(&cpumask, SCHEDULE_SOFTIRQ);
+ cpumask_raise_softirq(&cpumask, RCU_SOFTIRQ);
}
}
@@ -259,7 +261,10 @@ static void rcu_do_batch(struct rcu_data *rdp)
if (!rdp->donelist)
rdp->donetail = &rdp->donelist;
else
+ {
+ rdp->process_callbacks = true;
raise_softirq(RCU_SOFTIRQ);
+ }
}
/*
@@ -410,7 +415,13 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
static void rcu_process_callbacks(void)
{
- __rcu_process_callbacks(&rcu_ctrlblk, &this_cpu(rcu_data));
+ struct rcu_data *rdp = &this_cpu(rcu_data);
+
+ if ( rdp->process_callbacks )
+ {
+ rdp->process_callbacks = false;
+ __rcu_process_callbacks(&rcu_ctrlblk, rdp);
+ }
}
static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
@@ -518,6 +529,9 @@ static void rcu_idle_timer_handler(void* data)
void rcu_check_callbacks(int cpu)
{
+ struct rcu_data *rdp = &this_cpu(rcu_data);
+
+ rdp->process_callbacks = true;
raise_softirq(RCU_SOFTIRQ);
}
As rcu callbacks are processed in __do_softirq() there is no need to use the scheduling softirq for forcing quiescent state. Any other softirq would do the job and the scheduling one is the most expensive. So use the already existing rcu softirq for that purpose. For telling apart why the rcu softirq was raised add a flag for the current usage. Signed-off-by: Juergen Gross <jgross@suse.com> --- xen/common/rcupdate.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-)