diff mbox

[v1,2/2] include/qemu/atomic: add compile time asserts

Message ID 1458577386-9984-3-git-send-email-alex.bennee@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Bennée March 21, 2016, 4:23 p.m. UTC
To be safely portable no atomic access should be trying to do more than
the natural word width of the host. The most common abuse is trying to
atomically access 64 bit values on a 32 bit host.

This patch adds some QEMU_BUILD_BUG_ON to the __atomic instrinsic paths
to create a build failure if (sizeof(*ptr) > sizeof(void *)).

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
 include/qemu/atomic.h | 58 ++++++++++++++++++++++++++++++---------------------
 1 file changed, 34 insertions(+), 24 deletions(-)

Comments

Alex Bennée April 1, 2016, 3:03 p.m. UTC | #1
Alex Bennée <alex.bennee@linaro.org> writes:

> To be safely portable no atomic access should be trying to do more than
> the natural word width of the host. The most common abuse is trying to
> atomically access 64 bit values on a 32 bit host.
>
> This patch adds some QEMU_BUILD_BUG_ON to the __atomic instrinsic paths
> to create a build failure if (sizeof(*ptr) > sizeof(void *)).
>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

Ping Paolo. Is this worth including in a re-spin?

> ---
>  include/qemu/atomic.h | 58 ++++++++++++++++++++++++++++++---------------------
>  1 file changed, 34 insertions(+), 24 deletions(-)
>
> diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
> index 8f1d8d9..5bc4d6c 100644
> --- a/include/qemu/atomic.h
> +++ b/include/qemu/atomic.h
> @@ -42,30 +42,34 @@
>   * loads/stores past the atomic operation load/store. However there is
>   * no explicit memory barrier for the processor.
>   */
> -#define atomic_read(ptr)                          \
> -    ({                                            \
> -    typeof(*ptr) _val;                            \
> -     __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
> -    _val;                                         \
> +#define atomic_read(ptr)                              \
> +    ({                                                \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
> +    typeof(*ptr) _val;                                \
> +     __atomic_load(ptr, &_val, __ATOMIC_RELAXED);     \
> +    _val;                                             \
>      })
>
> -#define atomic_set(ptr, i)  do {                  \
> -    typeof(*ptr) _val = (i);                      \
> -    __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
> +#define atomic_set(ptr, i)  do {                      \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
> +    typeof(*ptr) _val = (i);                          \
> +    __atomic_store(ptr, &_val, __ATOMIC_RELAXED);     \
>  } while(0)
>
>  /* Atomic RCU operations imply weak memory barriers */
>
> -#define atomic_rcu_read(ptr)                      \
> -    ({                                            \
> -    typeof(*ptr) _val;                            \
> -     __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
> -    _val;                                         \
> +#define atomic_rcu_read(ptr)                          \
> +    ({                                                \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
> +    typeof(*ptr) _val;                                \
> +    __atomic_load(ptr, &_val, __ATOMIC_CONSUME);      \
> +    _val;                                             \
>      })
>
> -#define atomic_rcu_set(ptr, i)  do {                    \
> -    typeof(*ptr) _val = (i);                            \
> -    __atomic_store(ptr, &_val, __ATOMIC_RELEASE);       \
> +#define atomic_rcu_set(ptr, i) do {                   \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
> +    typeof(*ptr) _val = (i);                          \
> +    __atomic_store(ptr, &_val, __ATOMIC_RELEASE);     \
>  } while(0)
>
>  /* atomic_mb_read/set semantics map Java volatile variables. They are
> @@ -79,6 +83,7 @@
>  #if defined(_ARCH_PPC)
>  #define atomic_mb_read(ptr)                             \
>      ({                                                  \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
>      typeof(*ptr) _val;                                  \
>       __atomic_load(ptr, &_val, __ATOMIC_RELAXED);       \
>       smp_rmb();                                         \
> @@ -86,22 +91,25 @@
>      })
>
>  #define atomic_mb_set(ptr, i)  do {                     \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
>      typeof(*ptr) _val = (i);                            \
>      smp_wmb();                                          \
>      __atomic_store(ptr, &_val, __ATOMIC_RELAXED);       \
>      smp_mb();                                           \
>  } while(0)
>  #else
> -#define atomic_mb_read(ptr)                       \
> -    ({                                            \
> -    typeof(*ptr) _val;                            \
> -     __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
> -    _val;                                         \
> +#define atomic_mb_read(ptr)                             \
> +    ({                                                  \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
> +    typeof(*ptr) _val;                                  \
> +    __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST);        \
> +    _val;                                               \
>      })
>
> -#define atomic_mb_set(ptr, i)  do {               \
> -    typeof(*ptr) _val = (i);                      \
> -    __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
> +#define atomic_mb_set(ptr, i)  do {                     \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
> +    typeof(*ptr) _val = (i);                            \
> +    __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST);       \
>  } while(0)
>  #endif
>
> @@ -109,6 +117,7 @@
>  /* All the remaining operations are fully sequentially consistent */
>
>  #define atomic_xchg(ptr, i)    ({                           \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));       \
>      typeof(*ptr) _new = (i), _old;                          \
>      __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
>      _old;                                                   \
> @@ -117,6 +126,7 @@
>  /* Returns the eventual value, failed or not */
>  #define atomic_cmpxchg(ptr, old, new)                                   \
>      ({                                                                  \
> +    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));                   \
>      typeof(*ptr) _old = (old), _new = (new);                            \
>      __atomic_compare_exchange(ptr, &_old, &_new, false,                 \
>                                __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \


--
Alex Bennée
Paolo Bonzini April 4, 2016, 8:33 a.m. UTC | #2
On 01/04/2016 17:03, Alex Bennée wrote:
> 
>> > To be safely portable no atomic access should be trying to do more than
>> > the natural word width of the host. The most common abuse is trying to
>> > atomically access 64 bit values on a 32 bit host.
>> >
>> > This patch adds some QEMU_BUILD_BUG_ON to the __atomic instrinsic paths
>> > to create a build failure if (sizeof(*ptr) > sizeof(void *)).
>> >
>> > Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> 
> Ping Paolo. Is this worth including in a re-spin?

Yes, please.
diff mbox

Patch

diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 8f1d8d9..5bc4d6c 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -42,30 +42,34 @@ 
  * loads/stores past the atomic operation load/store. However there is
  * no explicit memory barrier for the processor.
  */
-#define atomic_read(ptr)                          \
-    ({                                            \
-    typeof(*ptr) _val;                            \
-     __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
-    _val;                                         \
+#define atomic_read(ptr)                              \
+    ({                                                \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+    typeof(*ptr) _val;                                \
+     __atomic_load(ptr, &_val, __ATOMIC_RELAXED);     \
+    _val;                                             \
     })
 
-#define atomic_set(ptr, i)  do {                  \
-    typeof(*ptr) _val = (i);                      \
-    __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
+#define atomic_set(ptr, i)  do {                      \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+    typeof(*ptr) _val = (i);                          \
+    __atomic_store(ptr, &_val, __ATOMIC_RELAXED);     \
 } while(0)
 
 /* Atomic RCU operations imply weak memory barriers */
 
-#define atomic_rcu_read(ptr)                      \
-    ({                                            \
-    typeof(*ptr) _val;                            \
-     __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
-    _val;                                         \
+#define atomic_rcu_read(ptr)                          \
+    ({                                                \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+    typeof(*ptr) _val;                                \
+    __atomic_load(ptr, &_val, __ATOMIC_CONSUME);      \
+    _val;                                             \
     })
 
-#define atomic_rcu_set(ptr, i)  do {                    \
-    typeof(*ptr) _val = (i);                            \
-    __atomic_store(ptr, &_val, __ATOMIC_RELEASE);       \
+#define atomic_rcu_set(ptr, i) do {                   \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
+    typeof(*ptr) _val = (i);                          \
+    __atomic_store(ptr, &_val, __ATOMIC_RELEASE);     \
 } while(0)
 
 /* atomic_mb_read/set semantics map Java volatile variables. They are
@@ -79,6 +83,7 @@ 
 #if defined(_ARCH_PPC)
 #define atomic_mb_read(ptr)                             \
     ({                                                  \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
     typeof(*ptr) _val;                                  \
      __atomic_load(ptr, &_val, __ATOMIC_RELAXED);       \
      smp_rmb();                                         \
@@ -86,22 +91,25 @@ 
     })
 
 #define atomic_mb_set(ptr, i)  do {                     \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
     typeof(*ptr) _val = (i);                            \
     smp_wmb();                                          \
     __atomic_store(ptr, &_val, __ATOMIC_RELAXED);       \
     smp_mb();                                           \
 } while(0)
 #else
-#define atomic_mb_read(ptr)                       \
-    ({                                            \
-    typeof(*ptr) _val;                            \
-     __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
-    _val;                                         \
+#define atomic_mb_read(ptr)                             \
+    ({                                                  \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
+    typeof(*ptr) _val;                                  \
+    __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST);        \
+    _val;                                               \
     })
 
-#define atomic_mb_set(ptr, i)  do {               \
-    typeof(*ptr) _val = (i);                      \
-    __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
+#define atomic_mb_set(ptr, i)  do {                     \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
+    typeof(*ptr) _val = (i);                            \
+    __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST);       \
 } while(0)
 #endif
 
@@ -109,6 +117,7 @@ 
 /* All the remaining operations are fully sequentially consistent */
 
 #define atomic_xchg(ptr, i)    ({                           \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));       \
     typeof(*ptr) _new = (i), _old;                          \
     __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
     _old;                                                   \
@@ -117,6 +126,7 @@ 
 /* Returns the eventual value, failed or not */
 #define atomic_cmpxchg(ptr, old, new)                                   \
     ({                                                                  \
+    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));                   \
     typeof(*ptr) _old = (old), _new = (new);                            \
     __atomic_compare_exchange(ptr, &_old, &_new, false,                 \
                               __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \