diff mbox series

[v2,1/2] xen/link: Introduce .bss.percpu.page_aligned

Message ID 20190726203222.4833-2-andrew.cooper3@citrix.com (mailing list archive)
State Superseded
Headers show
Series x86/xpti: Don't leak TSS-adjacent percpu data via Meltdown | expand

Commit Message

Andrew Cooper July 26, 2019, 8:32 p.m. UTC
Future changes are going to need to page align some percpu data.

This means that the percpu area needs suitably aligning in the BSS so CPU0 has
correctly aligned data.  Shuffle the exact link order of items within the BSS
to give .bss.percpu.page_aligned appropriate alignment.

In addition, we need to be able to specify an alignment attribute to
__DEFINE_PER_CPU().  Rework it so the caller passes in all attributes, and
adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
that it is now possible to grep for .bss.percpu and find all the users.

Finally, introduce DEFINE_PER_CPU_PAGE_ALIGNED() which uses both section and
alignment attributes.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wl@xen.org>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien.grall@arm.com>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>

v2:
 * Rework __DEFINE_PER_CPU() to allow for further attributes to be passed.
 * Specify __aligned(PAGE_SIZE) as part of DEFINE_PER_CPU_PAGE_ALIGNED().
---
 xen/arch/arm/xen.lds.S       |  5 +++--
 xen/arch/x86/xen.lds.S       |  5 +++--
 xen/include/asm-arm/percpu.h |  6 ++----
 xen/include/asm-x86/percpu.h |  6 ++----
 xen/include/xen/percpu.h     | 10 ++++++++--
 5 files changed, 18 insertions(+), 14 deletions(-)

Comments

Julien Grall July 26, 2019, 9:39 p.m. UTC | #1
Hi Andrew,

On 7/26/19 9:32 PM, Andrew Cooper wrote:
> Future changes are going to need to page align some percpu data.
> 
> This means that the percpu area needs suitably aligning in the BSS so CPU0 has
> correctly aligned data.  Shuffle the exact link order of items within the BSS
> to give .bss.percpu.page_aligned appropriate alignment.
> 
> In addition, we need to be able to specify an alignment attribute to
> __DEFINE_PER_CPU().  Rework it so the caller passes in all attributes, and
> adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
> that it is now possible to grep for .bss.percpu and find all the users.
> 
> Finally, introduce DEFINE_PER_CPU_PAGE_ALIGNED() which uses both section and
> alignment attributes.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: Julien Grall <julien.grall@arm.com>

Cheers,

> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Wei Liu <wl@xen.org>
> CC: Roger Pau Monné <roger.pau@citrix.com>
> CC: Stefano Stabellini <sstabellini@kernel.org>
> CC: Julien Grall <julien.grall@arm.com>
> CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
> 
> v2:
>   * Rework __DEFINE_PER_CPU() to allow for further attributes to be passed.
>   * Specify __aligned(PAGE_SIZE) as part of DEFINE_PER_CPU_PAGE_ALIGNED().
> ---
>   xen/arch/arm/xen.lds.S       |  5 +++--
>   xen/arch/x86/xen.lds.S       |  5 +++--
>   xen/include/asm-arm/percpu.h |  6 ++----
>   xen/include/asm-x86/percpu.h |  6 ++----
>   xen/include/xen/percpu.h     | 10 ++++++++--
>   5 files changed, 18 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
> index 12c107f45d..07cbdf2543 100644
> --- a/xen/arch/arm/xen.lds.S
> +++ b/xen/arch/arm/xen.lds.S
> @@ -201,14 +201,15 @@ SECTIONS
>          *(.bss.stack_aligned)
>          . = ALIGN(PAGE_SIZE);
>          *(.bss.page_aligned)
> -       *(.bss)
> -       . = ALIGN(SMP_CACHE_BYTES);
>          __per_cpu_start = .;
> +       *(.bss.percpu.page_aligned)
>          *(.bss.percpu)
>          . = ALIGN(SMP_CACHE_BYTES);
>          *(.bss.percpu.read_mostly)
>          . = ALIGN(SMP_CACHE_BYTES);
>          __per_cpu_data_end = .;
> +       *(.bss)
> +       . = ALIGN(SMP_CACHE_BYTES);
>          __bss_end = .;
>     } :text
>     _end = . ;
> diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S
> index a73139cd29..b8a2ea4259 100644
> --- a/xen/arch/x86/xen.lds.S
> +++ b/xen/arch/x86/xen.lds.S
> @@ -293,14 +293,15 @@ SECTIONS
>          __bss_start = .;
>          *(.bss.stack_aligned)
>          *(.bss.page_aligned*)
> -       *(.bss)
> -       . = ALIGN(SMP_CACHE_BYTES);
>          __per_cpu_start = .;
> +       *(.bss.percpu.page_aligned)
>          *(.bss.percpu)
>          . = ALIGN(SMP_CACHE_BYTES);
>          *(.bss.percpu.read_mostly)
>          . = ALIGN(SMP_CACHE_BYTES);
>          __per_cpu_data_end = .;
> +       *(.bss)
> +       . = ALIGN(SMP_CACHE_BYTES);
>          __bss_end = .;
>     } :text
>     _end = . ;
> diff --git a/xen/include/asm-arm/percpu.h b/xen/include/asm-arm/percpu.h
> index 9584b830d4..264120b192 100644
> --- a/xen/include/asm-arm/percpu.h
> +++ b/xen/include/asm-arm/percpu.h
> @@ -10,10 +10,8 @@ extern char __per_cpu_start[], __per_cpu_data_end[];
>   extern unsigned long __per_cpu_offset[NR_CPUS];
>   void percpu_init_areas(void);
>   
> -/* Separate out the type, so (int[3], foo) works. */
> -#define __DEFINE_PER_CPU(type, name, suffix)                    \
> -    __section(".bss.percpu" #suffix)                            \
> -    __typeof__(type) per_cpu_##name
> +#define __DEFINE_PER_CPU(attr, type, name) \
> +    attr __typeof__(type) per_cpu_ ## name
>   
>   #define per_cpu(var, cpu)  \
>       (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
> diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h
> index ff34dc7897..5b6cef04c4 100644
> --- a/xen/include/asm-x86/percpu.h
> +++ b/xen/include/asm-x86/percpu.h
> @@ -7,10 +7,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
>   void percpu_init_areas(void);
>   #endif
>   
> -/* Separate out the type, so (int[3], foo) works. */
> -#define __DEFINE_PER_CPU(type, name, suffix)                    \
> -    __section(".bss.percpu" #suffix)                            \
> -    __typeof__(type) per_cpu_##name
> +#define __DEFINE_PER_CPU(attr, type, name) \
> +    attr __typeof__(type) per_cpu_ ## name
>   
>   /* var is in discarded region: offset to particular copy we want */
>   #define per_cpu(var, cpu)  \
> diff --git a/xen/include/xen/percpu.h b/xen/include/xen/percpu.h
> index aeec5c19d6..71a31cc361 100644
> --- a/xen/include/xen/percpu.h
> +++ b/xen/include/xen/percpu.h
> @@ -9,9 +9,15 @@
>    * The _##name concatenation is being used here to prevent 'name' from getting
>    * macro expanded, while still allowing a per-architecture symbol name prefix.
>    */
> -#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
> +#define DEFINE_PER_CPU(type, name) \
> +    __DEFINE_PER_CPU(__section(".bss.percpu"), type, _ ## name)
> +
> +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
> +    __DEFINE_PER_CPU(__section(".bss.percpu.page_aligned") \
> +                     __aligned(PAGE_SIZE), type, _ ## name)
> +
>   #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
> -	__DEFINE_PER_CPU(type, _##name, .read_mostly)
> +    __DEFINE_PER_CPU(__section(".bss.percpu.read_mostly"), type, _ ## name)
>   
>   #define get_per_cpu_var(var)  (per_cpu__##var)
>   
>
Roger Pau Monné July 29, 2019, 8:52 a.m. UTC | #2
On Fri, Jul 26, 2019 at 09:32:21PM +0100, Andrew Cooper wrote:
> Future changes are going to need to page align some percpu data.
> 
> This means that the percpu area needs suitably aligning in the BSS so CPU0 has
> correctly aligned data.  Shuffle the exact link order of items within the BSS
> to give .bss.percpu.page_aligned appropriate alignment.
> 
> In addition, we need to be able to specify an alignment attribute to
> __DEFINE_PER_CPU().  Rework it so the caller passes in all attributes, and
> adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
> that it is now possible to grep for .bss.percpu and find all the users.
> 
> Finally, introduce DEFINE_PER_CPU_PAGE_ALIGNED() which uses both section and
> alignment attributes.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

> diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h
> index ff34dc7897..5b6cef04c4 100644
> --- a/xen/include/asm-x86/percpu.h
> +++ b/xen/include/asm-x86/percpu.h
> @@ -7,10 +7,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
>  void percpu_init_areas(void);
>  #endif
>  
> -/* Separate out the type, so (int[3], foo) works. */
> -#define __DEFINE_PER_CPU(type, name, suffix)                    \
> -    __section(".bss.percpu" #suffix)                            \
> -    __typeof__(type) per_cpu_##name
> +#define __DEFINE_PER_CPU(attr, type, name) \
> +    attr __typeof__(type) per_cpu_ ## name
>  
>  /* var is in discarded region: offset to particular copy we want */
>  #define per_cpu(var, cpu)  \
> diff --git a/xen/include/xen/percpu.h b/xen/include/xen/percpu.h
> index aeec5c19d6..71a31cc361 100644
> --- a/xen/include/xen/percpu.h
> +++ b/xen/include/xen/percpu.h
> @@ -9,9 +9,15 @@
>   * The _##name concatenation is being used here to prevent 'name' from getting
>   * macro expanded, while still allowing a per-architecture symbol name prefix.
>   */
> -#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
> +#define DEFINE_PER_CPU(type, name) \
> +    __DEFINE_PER_CPU(__section(".bss.percpu"), type, _ ## name)
> +
> +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
> +    __DEFINE_PER_CPU(__section(".bss.percpu.page_aligned") \
> +                     __aligned(PAGE_SIZE), type, _ ## name)
> +
>  #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
> -	__DEFINE_PER_CPU(type, _##name, .read_mostly)
> +    __DEFINE_PER_CPU(__section(".bss.percpu.read_mostly"), type, _ ## name)

AFAICT also adding a '_' here will result in variable names with
per_cpu__foo, which is inline with the previous behaviour, but I'm not
sure of the point of the double underscore.

Thanks, Roger.
Andrew Cooper July 29, 2019, 9:01 a.m. UTC | #3
On 29/07/2019 09:52, Roger Pau Monné wrote:
> On Fri, Jul 26, 2019 at 09:32:21PM +0100, Andrew Cooper wrote:
>> Future changes are going to need to page align some percpu data.
>>
>> This means that the percpu area needs suitably aligning in the BSS so CPU0 has
>> correctly aligned data.  Shuffle the exact link order of items within the BSS
>> to give .bss.percpu.page_aligned appropriate alignment.
>>
>> In addition, we need to be able to specify an alignment attribute to
>> __DEFINE_PER_CPU().  Rework it so the caller passes in all attributes, and
>> adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
>> that it is now possible to grep for .bss.percpu and find all the users.
>>
>> Finally, introduce DEFINE_PER_CPU_PAGE_ALIGNED() which uses both section and
>> alignment attributes.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
>
>> diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h
>> index ff34dc7897..5b6cef04c4 100644
>> --- a/xen/include/asm-x86/percpu.h
>> +++ b/xen/include/asm-x86/percpu.h
>> @@ -7,10 +7,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
>>  void percpu_init_areas(void);
>>  #endif
>>  
>> -/* Separate out the type, so (int[3], foo) works. */
>> -#define __DEFINE_PER_CPU(type, name, suffix)                    \
>> -    __section(".bss.percpu" #suffix)                            \
>> -    __typeof__(type) per_cpu_##name
>> +#define __DEFINE_PER_CPU(attr, type, name) \
>> +    attr __typeof__(type) per_cpu_ ## name
>>  
>>  /* var is in discarded region: offset to particular copy we want */
>>  #define per_cpu(var, cpu)  \
>> diff --git a/xen/include/xen/percpu.h b/xen/include/xen/percpu.h
>> index aeec5c19d6..71a31cc361 100644
>> --- a/xen/include/xen/percpu.h
>> +++ b/xen/include/xen/percpu.h
>> @@ -9,9 +9,15 @@
>>   * The _##name concatenation is being used here to prevent 'name' from getting
>>   * macro expanded, while still allowing a per-architecture symbol name prefix.
>>   */
>> -#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
>> +#define DEFINE_PER_CPU(type, name) \
>> +    __DEFINE_PER_CPU(__section(".bss.percpu"), type, _ ## name)
>> +
>> +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
>> +    __DEFINE_PER_CPU(__section(".bss.percpu.page_aligned") \
>> +                     __aligned(PAGE_SIZE), type, _ ## name)
>> +
>>  #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
>> -	__DEFINE_PER_CPU(type, _##name, .read_mostly)
>> +    __DEFINE_PER_CPU(__section(".bss.percpu.read_mostly"), type, _ ## name)
> AFAICT also adding a '_' here will result in variable names with
> per_cpu__foo, which is inline with the previous behaviour, but I'm not
> sure of the point of the double underscore.

Its double, to avoid token expansion.  See the comment in context at the
top of this hunk.

Without it, the schedulers don't compile because cpumask_scratch is both
the name of a percpu variable, and an alias to
&this_cpu(cpumask_scratch).  Omitting the token concatenation here
causes &this_cpu() to be expanded into __DEFINE_PER_CPU().

~Andrew

P.S. Guess who tried to remove this piece of "fun" to begin with...
Jan Beulich July 29, 2019, 1:17 p.m. UTC | #4
On 26.07.2019 22:32, Andrew Cooper wrote:
> Future changes are going to need to page align some percpu data.
> 
> This means that the percpu area needs suitably aligning in the BSS so CPU0 has
> correctly aligned data.  Shuffle the exact link order of items within the BSS
> to give .bss.percpu.page_aligned appropriate alignment.
> 
> In addition, we need to be able to specify an alignment attribute to
> __DEFINE_PER_CPU().  Rework it so the caller passes in all attributes, and
> adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
> that it is now possible to grep for .bss.percpu and find all the users.

And it has the meaningful downside of now every use site needing to get
things right. This is not really a problem solely because
__DEFINE_PER_CPU() is a helper for all the real DEFINE_PER_CPU*(). The
grep-ing argument is not a really meaningful one imo anyway - you could
as well grep for DEFINE_PER_CPU.

Anyway - this is not an objection to the chosen approach, just a remark.
I'd like to note though that you explicitly undo something I had done
(iirc), and I may find odd when running into again down the road,
potentially resulting in an "undo-the-undo" patch. I think we really
need to find a way to avoid re-doing things that were done intentionally
in certain ways, when the difference between variants is merely personal
taste.

> --- a/xen/arch/x86/xen.lds.S
> +++ b/xen/arch/x86/xen.lds.S
> @@ -293,14 +293,15 @@ SECTIONS
>          __bss_start = .;
>          *(.bss.stack_aligned)
>          *(.bss.page_aligned*)
> -       *(.bss)
> -       . = ALIGN(SMP_CACHE_BYTES);
>          __per_cpu_start = .;
> +       *(.bss.percpu.page_aligned)

Now this is a case where I think an explicit ALIGN(PAGE_SIZE) would be
desirable: If the last item in .bss.page_aligned was not a multiple of
PAGE_SIZE in size, then __per_cpu_start would live needlessly early,
possibly increasing our memory overhead by a page per CPU for no gain
at all.

>          *(.bss.percpu)
>          . = ALIGN(SMP_CACHE_BYTES);
>          *(.bss.percpu.read_mostly)
>          . = ALIGN(SMP_CACHE_BYTES);
>          __per_cpu_data_end = .;
> +       *(.bss)
> +       . = ALIGN(SMP_CACHE_BYTES);
>          __bss_end = .;

Why is this last ALIGN() needed?

Jan
Andrew Cooper July 29, 2019, 3:01 p.m. UTC | #5
On 29/07/2019 14:17, Jan Beulich wrote:
> On 26.07.2019 22:32, Andrew Cooper wrote:
>> Future changes are going to need to page align some percpu data.
>>
>> This means that the percpu area needs suitably aligning in the BSS so CPU0 has
>> correctly aligned data.  Shuffle the exact link order of items within the BSS
>> to give .bss.percpu.page_aligned appropriate alignment.
>>
>> In addition, we need to be able to specify an alignment attribute to
>> __DEFINE_PER_CPU().  Rework it so the caller passes in all attributes, and
>> adjust DEFINE_PER_CPU{,_READ_MOSTLY}() to match.  This has the added bonus
>> that it is now possible to grep for .bss.percpu and find all the users.
> And it has the meaningful downside of now every use site needing to get
> things right.

You say this as if it the current way of doing things is anything more
than an illusion of protection.

>  This is not really a problem solely because
> __DEFINE_PER_CPU() is a helper for all the real DEFINE_PER_CPU*(). The
> grep-ing argument is not a really meaningful one imo anyway - you could
> as well grep for DEFINE_PER_CPU.

And as usual, our points of view differ substantially here.  Looking for
DEFINE_PER_CPU() doesn't help you identify the sections in use.  That
requires a further level of indirection.

> Anyway - this is not an objection to the chosen approach, just a remark.
> I'd like to note though that you explicitly undo something I had done
> (iirc), and I may find odd when running into again down the road,
> potentially resulting in an "undo-the-undo" patch. I think we really
> need to find a way to avoid re-doing things that were done intentionally
> in certain ways, when the difference between variants is merely personal
> taste.

Keir introduced percpu in ea608cc36d when DEFINE_PER_CPU() was private
to x86 and had the __section() implicit in it.

You changed DEFINE_PER_CPU() to include a suffix for the purpose of
introducing DEFINE_PER_CPU_READ_MOSTLY() in cfbf17ffbb0, but nowhere is
there any hint of a suggestion that the end result was anything more
than just "how it happened to turn out".


As to "this being intentional to remove mistakes".  There are plenty of
ways to screw this up, including ways which don't involve using
__DEFINE_PER_CPU(,, "foo"), or manually inserting something into
.bss.per_cpu outside of any of the percpu infrastructure, and no amount
of technical measures can catch this.

The only recourse is sensible code review, and any opencoded use of
__DEFINE_PER_CPU() or __section(".bss.per_cpu" ...) stick out in an
obvious manner.

>> --- a/xen/arch/x86/xen.lds.S
>> +++ b/xen/arch/x86/xen.lds.S
>> @@ -293,14 +293,15 @@ SECTIONS
>>          __bss_start = .;
>>          *(.bss.stack_aligned)
>>          *(.bss.page_aligned*)
>> -       *(.bss)
>> -       . = ALIGN(SMP_CACHE_BYTES);
>>          __per_cpu_start = .;
>> +       *(.bss.percpu.page_aligned)
> Now this is a case where I think an explicit ALIGN(PAGE_SIZE) would be
> desirable: If the last item in .bss.page_aligned was not a multiple of
> PAGE_SIZE in size, then __per_cpu_start would live needlessly early,
> possibly increasing our memory overhead by a page per CPU for no gain
> at all.

Hmm, yes.  We should do our best to defend against bugs like this.

>
>>          *(.bss.percpu)
>>          . = ALIGN(SMP_CACHE_BYTES);
>>          *(.bss.percpu.read_mostly)
>>          . = ALIGN(SMP_CACHE_BYTES);
>>          __per_cpu_data_end = .;
>> +       *(.bss)
>> +       . = ALIGN(SMP_CACHE_BYTES);
>>          __bss_end = .;
> Why is this last ALIGN() needed?

Try taking it out and the linker will make its feelings known.

Technically, it only needs 8 byte alignment (its so we can use rep stosq
to clear), which is more relaxed than SMP_CACHE_BYTES.

~Andrew
diff mbox series

Patch

diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S
index 12c107f45d..07cbdf2543 100644
--- a/xen/arch/arm/xen.lds.S
+++ b/xen/arch/arm/xen.lds.S
@@ -201,14 +201,15 @@  SECTIONS
        *(.bss.stack_aligned)
        . = ALIGN(PAGE_SIZE);
        *(.bss.page_aligned)
-       *(.bss)
-       . = ALIGN(SMP_CACHE_BYTES);
        __per_cpu_start = .;
+       *(.bss.percpu.page_aligned)
        *(.bss.percpu)
        . = ALIGN(SMP_CACHE_BYTES);
        *(.bss.percpu.read_mostly)
        . = ALIGN(SMP_CACHE_BYTES);
        __per_cpu_data_end = .;
+       *(.bss)
+       . = ALIGN(SMP_CACHE_BYTES);
        __bss_end = .;
   } :text
   _end = . ;
diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S
index a73139cd29..b8a2ea4259 100644
--- a/xen/arch/x86/xen.lds.S
+++ b/xen/arch/x86/xen.lds.S
@@ -293,14 +293,15 @@  SECTIONS
        __bss_start = .;
        *(.bss.stack_aligned)
        *(.bss.page_aligned*)
-       *(.bss)
-       . = ALIGN(SMP_CACHE_BYTES);
        __per_cpu_start = .;
+       *(.bss.percpu.page_aligned)
        *(.bss.percpu)
        . = ALIGN(SMP_CACHE_BYTES);
        *(.bss.percpu.read_mostly)
        . = ALIGN(SMP_CACHE_BYTES);
        __per_cpu_data_end = .;
+       *(.bss)
+       . = ALIGN(SMP_CACHE_BYTES);
        __bss_end = .;
   } :text
   _end = . ;
diff --git a/xen/include/asm-arm/percpu.h b/xen/include/asm-arm/percpu.h
index 9584b830d4..264120b192 100644
--- a/xen/include/asm-arm/percpu.h
+++ b/xen/include/asm-arm/percpu.h
@@ -10,10 +10,8 @@  extern char __per_cpu_start[], __per_cpu_data_end[];
 extern unsigned long __per_cpu_offset[NR_CPUS];
 void percpu_init_areas(void);
 
-/* Separate out the type, so (int[3], foo) works. */
-#define __DEFINE_PER_CPU(type, name, suffix)                    \
-    __section(".bss.percpu" #suffix)                            \
-    __typeof__(type) per_cpu_##name
+#define __DEFINE_PER_CPU(attr, type, name) \
+    attr __typeof__(type) per_cpu_ ## name
 
 #define per_cpu(var, cpu)  \
     (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
diff --git a/xen/include/asm-x86/percpu.h b/xen/include/asm-x86/percpu.h
index ff34dc7897..5b6cef04c4 100644
--- a/xen/include/asm-x86/percpu.h
+++ b/xen/include/asm-x86/percpu.h
@@ -7,10 +7,8 @@  extern unsigned long __per_cpu_offset[NR_CPUS];
 void percpu_init_areas(void);
 #endif
 
-/* Separate out the type, so (int[3], foo) works. */
-#define __DEFINE_PER_CPU(type, name, suffix)                    \
-    __section(".bss.percpu" #suffix)                            \
-    __typeof__(type) per_cpu_##name
+#define __DEFINE_PER_CPU(attr, type, name) \
+    attr __typeof__(type) per_cpu_ ## name
 
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu)  \
diff --git a/xen/include/xen/percpu.h b/xen/include/xen/percpu.h
index aeec5c19d6..71a31cc361 100644
--- a/xen/include/xen/percpu.h
+++ b/xen/include/xen/percpu.h
@@ -9,9 +9,15 @@ 
  * The _##name concatenation is being used here to prevent 'name' from getting
  * macro expanded, while still allowing a per-architecture symbol name prefix.
  */
-#define DEFINE_PER_CPU(type, name) __DEFINE_PER_CPU(type, _##name, )
+#define DEFINE_PER_CPU(type, name) \
+    __DEFINE_PER_CPU(__section(".bss.percpu"), type, _ ## name)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+    __DEFINE_PER_CPU(__section(".bss.percpu.page_aligned") \
+                     __aligned(PAGE_SIZE), type, _ ## name)
+
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
-	__DEFINE_PER_CPU(type, _##name, .read_mostly)
+    __DEFINE_PER_CPU(__section(".bss.percpu.read_mostly"), type, _ ## name)
 
 #define get_per_cpu_var(var)  (per_cpu__##var)