@@ -149,7 +149,7 @@ This case is handled by calls to the strongly ordered
``atomic_add_return()`` read-modify-write atomic operation that
is invoked within ``rcu_dynticks_eqs_enter()`` at idle-entry
time and within ``rcu_dynticks_eqs_exit()`` at idle-exit time.
-The grace-period kthread invokes first ``ct_dynticks_cpu_acquire()``
+The grace-period kthread invokes first ``ct_rcu_watching_cpu_acquire()``
(preceded by a full memory barrier) and ``rcu_dynticks_in_eqs_since()``
(both of which rely on acquire semantics) to detect idle CPUs.
@@ -68,7 +68,7 @@ static __always_inline int ct_rcu_watching_cpu(int cpu)
return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
@@ -323,7 +323,7 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
* performed by the remote CPU prior to entering idle and therefore can
* rely solely on acquire semantics.
*/
- return snap != ct_dynticks_cpu_acquire(rdp->cpu);
+ return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
}
/*
@@ -782,7 +782,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
* Ordering between remote CPU's pre idle accesses and post grace period
* updater's accesses is enforced by the below acquire semantic.
*/
- rdp->dynticks_snap = ct_dynticks_cpu_acquire(rdp->cpu);
+ rdp->dynticks_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp);
@@ -376,7 +376,7 @@ static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
* post grace period updater's accesses is enforced by the
* below acquire semantic.
*/
- snap = ct_dynticks_cpu_acquire(cpu);
+ snap = ct_rcu_watching_cpu_acquire(cpu);
if (rcu_dynticks_in_eqs(snap))
mask_ofl_test |= mask;
else