diff mbox series

[v7,1/5] xen: introduce smp_mb__[after|before]_atomic() barriers

Message ID 20200325105511.20882-2-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen/rcu: let rcu work better with core scheduling | expand

Commit Message

Jürgen Groß March 25, 2020, 10:55 a.m. UTC
When using atomic variables for synchronization barriers are needed
to ensure proper data serialization. Introduce smp_mb__before_atomic()
and smp_mb__after_atomic() as in the Linux kernel for that purpose.

Use the same definitions as in the Linux kernel.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V7:
- new patch
---
 xen/include/asm-arm/system.h | 3 +++
 xen/include/asm-x86/system.h | 3 +++
 2 files changed, 6 insertions(+)

Comments

Jan Beulich March 25, 2020, 1:17 p.m. UTC | #1
On 25.03.2020 11:55, Juergen Gross wrote:
> When using atomic variables for synchronization barriers are needed
> to ensure proper data serialization. Introduce smp_mb__before_atomic()
> and smp_mb__after_atomic() as in the Linux kernel for that purpose.
> 
> Use the same definitions as in the Linux kernel.
> 
> Suggested-by: Jan Beulich <jbeulich@suse.com>
> Signed-off-by: Juergen Gross <jgross@suse.com>

Acked-by: Jan Beulich <jbeulich@suse.com>
Julien Grall March 25, 2020, 4:20 p.m. UTC | #2
Hi Juergen,

On 25/03/2020 10:55, Juergen Gross wrote:
> When using atomic variables for synchronization barriers are needed
> to ensure proper data serialization. Introduce smp_mb__before_atomic()
> and smp_mb__after_atomic() as in the Linux kernel for that purpose.
> 
> Use the same definitions as in the Linux kernel.
> 
> Suggested-by: Jan Beulich <jbeulich@suse.com>
> Signed-off-by: Juergen Gross <jgross@suse.com>

Acked-by: Julien Grall <jgrall@amazon.com>

Cheers,

> ---
> V7:
> - new patch
> ---
>   xen/include/asm-arm/system.h | 3 +++
>   xen/include/asm-x86/system.h | 3 +++
>   2 files changed, 6 insertions(+)
> 
> diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
> index e5d062667d..65d5c8e423 100644
> --- a/xen/include/asm-arm/system.h
> +++ b/xen/include/asm-arm/system.h
> @@ -30,6 +30,9 @@
>   
>   #define smp_wmb()       dmb(ishst)
>   
> +#define smp_mb__before_atomic()    smp_mb()
> +#define smp_mb__after_atomic()     smp_mb()
> +
>   /*
>    * This is used to ensure the compiler did actually allocate the register we
>    * asked it for some inline assembly sequences.  Apparently we can't trust
> diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
> index 069f422f0d..7e5891f3df 100644
> --- a/xen/include/asm-x86/system.h
> +++ b/xen/include/asm-x86/system.h
> @@ -233,6 +233,9 @@ static always_inline unsigned long __xadd(
>   #define set_mb(var, value) do { xchg(&var, value); } while (0)
>   #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
>   
> +#define smp_mb__before_atomic()    do { } while (0)
> +#define smp_mb__after_atomic()     do { } while (0)
> +
>   /**
>    * array_index_mask_nospec() - generate a mask that is ~0UL when the
>    *      bounds check succeeds and 0 otherwise
>
diff mbox series

Patch

diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index e5d062667d..65d5c8e423 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -30,6 +30,9 @@ 
 
 #define smp_wmb()       dmb(ishst)
 
+#define smp_mb__before_atomic()    smp_mb()
+#define smp_mb__after_atomic()     smp_mb()
+
 /*
  * This is used to ensure the compiler did actually allocate the register we
  * asked it for some inline assembly sequences.  Apparently we can't trust
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 069f422f0d..7e5891f3df 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -233,6 +233,9 @@  static always_inline unsigned long __xadd(
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 #define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
 
+#define smp_mb__before_atomic()    do { } while (0)
+#define smp_mb__after_atomic()     do { } while (0)
+
 /**
  * array_index_mask_nospec() - generate a mask that is ~0UL when the
  *      bounds check succeeds and 0 otherwise