diff mbox series

[09/10] x86/rcu: Add rcu_preempt_count

Message ID 20240328075318.83039-10-jiangshanlai@gmail.com (mailing list archive)
State Superseded
Headers show
Series rcu/x86: Use per-cpu rcu preempt count | expand

Commit Message

Lai Jiangshan March 28, 2024, 7:53 a.m. UTC
From: Lai Jiangshan <jiangshan.ljs@antgroup.com>

Implement PCPU_RCU_PREEMPT_COUNT for x86.
Mainly copied from asm/preempt.h

Make rcu_read_[un]lock() inlined for rcu-preempt.
Make rcu_read_lock() only one instruction.
Make rcu_read_unlock() only two instructions in the fast path.

Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
---
 arch/x86/Kconfig                   |   1 +
 arch/x86/include/asm/current.h     |   3 +
 arch/x86/include/asm/rcu_preempt.h | 107 +++++++++++++++++++++++++++++
 arch/x86/kernel/cpu/common.c       |   7 +-
 4 files changed, 115 insertions(+), 3 deletions(-)
 create mode 100644 arch/x86/include/asm/rcu_preempt.h

Comments

kernel test robot March 29, 2024, 7:37 a.m. UTC | #1
Hi Lai,

kernel test robot noticed the following build errors:

[auto build test ERROR on paulmck-rcu/dev]
[also build test ERROR on tip/locking/core tip/sched/core tip/x86/asm tip/master linus/master v6.9-rc1 next-20240328]
[cannot apply to tip/x86/core tip/auto-latest]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Lai-Jiangshan/lib-Use-rcu_preempt_depth-to-replace-current-rcu_read_lock_nesting/20240328-155513
base:   https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
patch link:    https://lore.kernel.org/r/20240328075318.83039-10-jiangshanlai%40gmail.com
patch subject: [PATCH 09/10] x86/rcu: Add rcu_preempt_count
config: x86_64-randconfig-161-20240328 (https://download.01.org/0day-ci/archive/20240329/202403291422.SOVYexxO-lkp@intel.com/config)
compiler: gcc-10 (Ubuntu 10.5.0-1ubuntu1) 10.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240329/202403291422.SOVYexxO-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202403291422.SOVYexxO-lkp@intel.com/

All error/warnings (new ones prefixed by >>):

>> arch/x86/kernel/cpu/common.c:1998:3: error: 'struct pcpu_hot' has no member named 'rcu_preempt_count'; did you mean 'preempt_count'?
    1998 |  .rcu_preempt_count = RCU_PREEMPT_INIT,
         |   ^~~~~~~~~~~~~~~~~
         |   preempt_count
>> arch/x86/kernel/cpu/common.c:1998:23: error: 'RCU_PREEMPT_INIT' undeclared here (not in a function); did you mean 'RCUREF_INIT'?
    1998 |  .rcu_preempt_count = RCU_PREEMPT_INIT,
         |                       ^~~~~~~~~~~~~~~~
         |                       RCUREF_INIT
>> arch/x86/kernel/cpu/common.c:1998:23: warning: excess elements in struct initializer
   arch/x86/kernel/cpu/common.c:1998:23: note: (near initialization for 'pcpu_hot')


vim +1998 arch/x86/kernel/cpu/common.c

  1993	
  1994	DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
  1995		.current_task		= &init_task,
  1996		.preempt_count		= INIT_PREEMPT_COUNT,
  1997		.top_of_stack		= TOP_OF_INIT_STACK,
> 1998		.rcu_preempt_count	= RCU_PREEMPT_INIT,
  1999	};
  2000	EXPORT_PER_CPU_SYMBOL(pcpu_hot);
  2001	EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
  2002
kernel test robot March 29, 2024, 7:48 a.m. UTC | #2
Hi Lai,

kernel test robot noticed the following build errors:

[auto build test ERROR on paulmck-rcu/dev]
[also build test ERROR on tip/locking/core tip/sched/core tip/x86/asm tip/master linus/master v6.9-rc1 next-20240328]
[cannot apply to tip/x86/core tip/auto-latest]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Lai-Jiangshan/lib-Use-rcu_preempt_depth-to-replace-current-rcu_read_lock_nesting/20240328-155513
base:   https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
patch link:    https://lore.kernel.org/r/20240328075318.83039-10-jiangshanlai%40gmail.com
patch subject: [PATCH 09/10] x86/rcu: Add rcu_preempt_count
config: i386-randconfig-141-20240328 (https://download.01.org/0day-ci/archive/20240329/202403291537.fHsEhtlq-lkp@intel.com/config)
compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240329/202403291537.fHsEhtlq-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202403291537.fHsEhtlq-lkp@intel.com/

All errors (new ones prefixed by >>):

>> arch/x86/kernel/cpu/common.c:1998:23: error: use of undeclared identifier 'RCU_PREEMPT_INIT'
    1998 |         .rcu_preempt_count      = RCU_PREEMPT_INIT,
         |                                   ^
   1 error generated.

Kconfig warnings: (for reference only)
   WARNING: unmet direct dependencies detected for DRM_I915_DEBUG_GEM
   Depends on [n]: HAS_IOMEM [=y] && DRM_I915 [=y] && EXPERT [=y] && DRM_I915_WERROR [=n]
   Selected by [y]:
   - DRM_I915_DEBUG [=y] && HAS_IOMEM [=y] && DRM_I915 [=y] && EXPERT [=y] && !COMPILE_TEST [=n]


vim +/RCU_PREEMPT_INIT +1998 arch/x86/kernel/cpu/common.c

  1993	
  1994	DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
  1995		.current_task		= &init_task,
  1996		.preempt_count		= INIT_PREEMPT_COUNT,
  1997		.top_of_stack		= TOP_OF_INIT_STACK,
> 1998		.rcu_preempt_count	= RCU_PREEMPT_INIT,
  1999	};
  2000	EXPORT_PER_CPU_SYMBOL(pcpu_hot);
  2001	EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
  2002
Frederic Weisbecker April 22, 2024, 11:05 a.m. UTC | #3
Le Thu, Mar 28, 2024 at 03:53:17PM +0800, Lai Jiangshan a écrit :
> From: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> 
> Implement PCPU_RCU_PREEMPT_COUNT for x86.
> Mainly copied from asm/preempt.h
> 
> Make rcu_read_[un]lock() inlined for rcu-preempt.
> Make rcu_read_lock() only one instruction.
> Make rcu_read_unlock() only two instructions in the fast path.
> 
> Cc: "Paul E. McKenney" <paulmck@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
> ---
>  arch/x86/Kconfig                   |   1 +
>  arch/x86/include/asm/current.h     |   3 +
>  arch/x86/include/asm/rcu_preempt.h | 107 +++++++++++++++++++++++++++++
>  arch/x86/kernel/cpu/common.c       |   7 +-
>  4 files changed, 115 insertions(+), 3 deletions(-)
>  create mode 100644 arch/x86/include/asm/rcu_preempt.h
> 
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 78050d5d7fac..7eb17c12f7b7 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -257,6 +257,7 @@ config X86
>  	select HAVE_OBJTOOL			if X86_64
>  	select HAVE_OPTPROBES
>  	select HAVE_PAGE_SIZE_4KB
> +	select HAVE_PCPU_RCU_PREEMPT_COUNT
>  	select HAVE_PCSPKR_PLATFORM
>  	select HAVE_PERF_EVENTS
>  	select HAVE_PERF_EVENTS_NMI
> diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
> index bf5953883ec3..dcc2ef784120 100644
> --- a/arch/x86/include/asm/current.h
> +++ b/arch/x86/include/asm/current.h
> @@ -24,6 +24,9 @@ struct pcpu_hot {
>  			unsigned long		top_of_stack;
>  			void			*hardirq_stack_ptr;
>  			u16			softirq_pending;
> +#ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
> +			int			rcu_preempt_count;
> +#endif // #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
>  #ifdef CONFIG_X86_64
>  			bool			hardirq_stack_inuse;
>  #else
> diff --git a/arch/x86/include/asm/rcu_preempt.h b/arch/x86/include/asm/rcu_preempt.h
> new file mode 100644
> index 000000000000..cb25ebe038a5
> --- /dev/null
> +++ b/arch/x86/include/asm/rcu_preempt.h
> @@ -0,0 +1,107 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __ASM_RCU_PREEMPT_H
> +#define __ASM_RCU_PREEMPT_H
> +
> +#include <asm/rmwcc.h>
> +#include <asm/percpu.h>
> +#include <asm/current.h>
> +
> +#ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
> +
> +/* We use the MSB mostly because its available */

I think you can safely remove the "We " from all the comments :-)

> +#define RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED	0x80000000

How about RCU_PREEMPT_UNLOCK_FASTPATH ?

> +
> +/*
> + * We use the RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED bit as an inverted
> + * current->rcu_read_unlock_special.s such that a decrement hitting 0
> + * means we can and should call rcu_read_unlock_special().
> + */
> +#define RCU_PREEMPT_INIT	(0 + RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED)

Or simply:

#define RCU_PREEMPT_INIT     RCU_PREEMPT_UNLOCK_FASTPATH

Or you can even remove RCU_PREEMPT_INIT and use RCU_PREEMPT_UNLOCK_FASTPATH directly.

> +/*
> + * Because we keep RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED set when we do
> + * _not_ need to handle unlock-special for a fast-path decrement.
> + */
> +static __always_inline bool pcpu_rcu_preempt_count_dec_and_test(void)
> +{
> +	return GEN_UNARY_RMWcc("decl", __my_cpu_var(pcpu_hot.rcu_preempt_count), e,
> +			       __percpu_arg([var]));
> +}
> +
> +#define pcpu_rcu_read_unlock_special()						\
> +do {										\
> +	rcu_read_unlock_special();						\
> +} while (0)

Why not just call that directly?

> +
> +#endif // #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
> +
> +#endif /* __ASM_RCU_PREEMPT_H */
> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index ba8cf5e9ce56..0b204a649442 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -1992,9 +1992,10 @@ static __init int setup_clearcpuid(char *arg)
>  __setup("clearcpuid=", setup_clearcpuid);
>  
>  DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
> -	.current_task	= &init_task,
> -	.preempt_count	= INIT_PREEMPT_COUNT,
> -	.top_of_stack	= TOP_OF_INIT_STACK,
> +	.current_task		= &init_task,
> +	.preempt_count		= INIT_PREEMPT_COUNT,
> +	.top_of_stack		= TOP_OF_INIT_STACK,
> +	.rcu_preempt_count	= RCU_PREEMPT_INIT,

#ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT ?

Thanks.


>  };
>  EXPORT_PER_CPU_SYMBOL(pcpu_hot);
>  EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
> -- 
> 2.19.1.6.gb485710b
>
Lai Jiangshan April 23, 2024, 9:02 a.m. UTC | #4
Hello, Frederic

Thanks for reviewing.

On Mon, Apr 22, 2024 at 7:05 PM Frederic Weisbecker <frederic@kernel.org> wrote:

> > +
> > +/* We use the MSB mostly because its available */
>
> I think you can safely remove the "We " from all the comments :-)

The file is mainly copied from arch/x86/include/asm/preempt.h.
I will rephrase sentences in later iterations.

>
> > +#define RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED  0x80000000
>
> How about RCU_PREEMPT_UNLOCK_FASTPATH ?


I'm not good at naming. But the MSB really means exactly the opposite
of current->rcu_read_unlock_special and I think "UNLOCK_SPECIAL_INVERTED"
fits the meaning.

>
> > +
> > +/*
> > + * We use the RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED bit as an inverted
> > + * current->rcu_read_unlock_special.s such that a decrement hitting 0
> > + * means we can and should call rcu_read_unlock_special().
> > + */
> > +#define RCU_PREEMPT_INIT     (0 + RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED)
>
> Or simply:
>
> #define RCU_PREEMPT_INIT     RCU_PREEMPT_UNLOCK_FASTPATH
>
> Or you can even remove RCU_PREEMPT_INIT and use RCU_PREEMPT_UNLOCK_FASTPATH directly.

"0" means the initial rcu_preempt_count is 0 for the initial task.

> > +
> > +#endif // #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
> > +
> > +#endif /* __ASM_RCU_PREEMPT_H */
> > diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> > index ba8cf5e9ce56..0b204a649442 100644
> > --- a/arch/x86/kernel/cpu/common.c
> > +++ b/arch/x86/kernel/cpu/common.c
> > @@ -1992,9 +1992,10 @@ static __init int setup_clearcpuid(char *arg)
> >  __setup("clearcpuid=", setup_clearcpuid);
> >
> >  DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
> > -     .current_task   = &init_task,
> > -     .preempt_count  = INIT_PREEMPT_COUNT,
> > -     .top_of_stack   = TOP_OF_INIT_STACK,
> > +     .current_task           = &init_task,
> > +     .preempt_count          = INIT_PREEMPT_COUNT,
> > +     .top_of_stack           = TOP_OF_INIT_STACK,
> > +     .rcu_preempt_count      = RCU_PREEMPT_INIT,
>
> #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT ?
>
> Thanks.

Fixed in V2:
https://lore.kernel.org/lkml/20240407090558.3395-1-jiangshanlai@gmail.com/

Thanks
Lai

>
>
> >  };
> >  EXPORT_PER_CPU_SYMBOL(pcpu_hot);
> >  EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
> > --
> > 2.19.1.6.gb485710b
> >
Frederic Weisbecker April 23, 2024, 11:33 a.m. UTC | #5
Le Tue, Apr 23, 2024 at 05:02:35PM +0800, Lai Jiangshan a écrit :
> Hello, Frederic
> 
> Thanks for reviewing.
> 
> On Mon, Apr 22, 2024 at 7:05 PM Frederic Weisbecker <frederic@kernel.org> wrote:
> 
> > > +
> > > +/* We use the MSB mostly because its available */
> >
> > I think you can safely remove the "We " from all the comments :-)
> 
> The file is mainly copied from arch/x86/include/asm/preempt.h.
> I will rephrase sentences in later iterations.
> 
> >
> > > +#define RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED  0x80000000
> >
> > How about RCU_PREEMPT_UNLOCK_FASTPATH ?
> 
> 
> I'm not good at naming. But the MSB really means exactly the opposite
> of current->rcu_read_unlock_special and I think "UNLOCK_SPECIAL_INVERTED"
> fits the meaning.

Right but I tend to think a constant should tell what something is, not what
something is not.

FWIW, p->rcu_read_unlock_special could even be renamed to p->rcu_read_unlock_slowpath

Thanks.
diff mbox series

Patch

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 78050d5d7fac..7eb17c12f7b7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -257,6 +257,7 @@  config X86
 	select HAVE_OBJTOOL			if X86_64
 	select HAVE_OPTPROBES
 	select HAVE_PAGE_SIZE_4KB
+	select HAVE_PCPU_RCU_PREEMPT_COUNT
 	select HAVE_PCSPKR_PLATFORM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS_NMI
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index bf5953883ec3..dcc2ef784120 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -24,6 +24,9 @@  struct pcpu_hot {
 			unsigned long		top_of_stack;
 			void			*hardirq_stack_ptr;
 			u16			softirq_pending;
+#ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
+			int			rcu_preempt_count;
+#endif // #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
 #ifdef CONFIG_X86_64
 			bool			hardirq_stack_inuse;
 #else
diff --git a/arch/x86/include/asm/rcu_preempt.h b/arch/x86/include/asm/rcu_preempt.h
new file mode 100644
index 000000000000..cb25ebe038a5
--- /dev/null
+++ b/arch/x86/include/asm/rcu_preempt.h
@@ -0,0 +1,107 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_RCU_PREEMPT_H
+#define __ASM_RCU_PREEMPT_H
+
+#include <asm/rmwcc.h>
+#include <asm/percpu.h>
+#include <asm/current.h>
+
+#ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
+
+/* We use the MSB mostly because its available */
+#define RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED	0x80000000
+
+/*
+ * We use the RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED bit as an inverted
+ * current->rcu_read_unlock_special.s such that a decrement hitting 0
+ * means we can and should call rcu_read_unlock_special().
+ */
+#define RCU_PREEMPT_INIT	(0 + RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED)
+
+/*
+ * We mask the RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED bit so as not to
+ * confuse all current users that think a non-zero value indicates we
+ * are in a critical section.
+ */
+static inline int pcpu_rcu_preempt_count(void)
+{
+	return raw_cpu_read_4(pcpu_hot.rcu_preempt_count) & ~RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED;
+}
+
+static inline void pcpu_rcu_preempt_count_set(int count)
+{
+	int old, new;
+
+	old = raw_cpu_read_4(pcpu_hot.rcu_preempt_count);
+	do {
+		new = (old & RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED) |
+			(count & ~RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED);
+	} while (!raw_cpu_try_cmpxchg_4(pcpu_hot.rcu_preempt_count, &old, new));
+}
+
+/*
+ * We fold the RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED bit into the RCU
+ * preempt count such that rcu_read_unlock() can decrement and test for
+ * the need of unlock-special handling with a single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know
+ * we both reach a quiescent state (no rcu preempt count) and need to
+ * handle unlock-special (the bit is cleared), normally to report the
+ * quiescent state immediately.
+ */
+
+static inline void pcpu_rcu_preempt_special_set(void)
+{
+	raw_cpu_and_4(pcpu_hot.rcu_preempt_count, ~RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED);
+}
+
+static inline void pcpu_rcu_preempt_special_clear(void)
+{
+	raw_cpu_or_4(pcpu_hot.rcu_preempt_count, RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED);
+}
+
+static inline bool pcpu_rcu_preempt_special_test(void)
+{
+	return !(raw_cpu_read_4(pcpu_hot.rcu_preempt_count) & RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED);
+}
+
+static inline void pcpu_rcu_preempt_switch(int count, bool special)
+{
+	if (likely(!special))
+		raw_cpu_write(pcpu_hot.rcu_preempt_count, count | RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED);
+	else
+		raw_cpu_write(pcpu_hot.rcu_preempt_count, count);
+}
+
+/*
+ * The various rcu_preempt_count add/sub methods
+ */
+
+static __always_inline void pcpu_rcu_preempt_count_add(int val)
+{
+	raw_cpu_add_4(pcpu_hot.rcu_preempt_count, val);
+}
+
+static __always_inline void pcpu_rcu_preempt_count_sub(int val)
+{
+	raw_cpu_add_4(pcpu_hot.rcu_preempt_count, -val);
+}
+
+/*
+ * Because we keep RCU_PREEMPT_UNLOCK_SPECIAL_INVERTED set when we do
+ * _not_ need to handle unlock-special for a fast-path decrement.
+ */
+static __always_inline bool pcpu_rcu_preempt_count_dec_and_test(void)
+{
+	return GEN_UNARY_RMWcc("decl", __my_cpu_var(pcpu_hot.rcu_preempt_count), e,
+			       __percpu_arg([var]));
+}
+
+#define pcpu_rcu_read_unlock_special()						\
+do {										\
+	rcu_read_unlock_special();						\
+} while (0)
+
+#endif // #ifdef CONFIG_PCPU_RCU_PREEMPT_COUNT
+
+#endif /* __ASM_RCU_PREEMPT_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ba8cf5e9ce56..0b204a649442 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1992,9 +1992,10 @@  static __init int setup_clearcpuid(char *arg)
 __setup("clearcpuid=", setup_clearcpuid);
 
 DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
-	.current_task	= &init_task,
-	.preempt_count	= INIT_PREEMPT_COUNT,
-	.top_of_stack	= TOP_OF_INIT_STACK,
+	.current_task		= &init_task,
+	.preempt_count		= INIT_PREEMPT_COUNT,
+	.top_of_stack		= TOP_OF_INIT_STACK,
+	.rcu_preempt_count	= RCU_PREEMPT_INIT,
 };
 EXPORT_PER_CPU_SYMBOL(pcpu_hot);
 EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);