summaryrefslogtreecommitdiff
path: root/libatomic
diff options
context:
space:
mode:
authorTamar Christina <tamar.christina@arm.com>2022-08-08 14:37:42 +0100
committerTamar Christina <tamar.christina@arm.com>2022-08-08 14:37:42 +0100
commit5471f55f001af412e1125b04972ebaab9d4f7337 (patch)
treed9e8fe8daf896f3984fda1914d521d51aa7fbfff /libatomic
parente6a8ae900b4141bbce1451da8f173d441662782d (diff)
Similar to AArch64 the Arm implementation of 128-bit atomics is broken. For 128-bit atomics we rely on pthread barriers to correct guard the address in the pointer to get correct memory ordering. However for 128-bit atomics the address under the lock is different from the original pointer. This means that one of the values under the atomic operation is not protected properly and so we fail during when the user has requested sequential consistency as there's no barrier to enforce this requirement. As such users have resorted to adding an #ifdef GCC <emit barrier> #endif around the use of these atomics. This corrects the issue by issuing a barrier only when __ATOMIC_SEQ_CST was requested. I have hand verified that the barriers are inserted for atomic seq cst. libatomic/ChangeLog: PR target/102218 * config/arm/host-config.h (pre_seq_barrier, post_seq_barrier, pre_post_seq_barrier): Require barrier on __ATOMIC_SEQ_CST.
Diffstat (limited to 'libatomic')
-rw-r--r--libatomic/config/arm/host-config.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/libatomic/config/arm/host-config.h b/libatomic/config/arm/host-config.h
index bbf4a3f84c3..ef16fad2a35 100644
--- a/libatomic/config/arm/host-config.h
+++ b/libatomic/config/arm/host-config.h
@@ -1,4 +1,23 @@
/* Avoiding the DMB (or kernel helper) can be a good thing. */
#define WANT_SPECIALCASE_RELAXED
+/* Glibc, at least, uses acq_rel in its pthread mutex
+ implementation. If the user is asking for seq_cst,
+ this is insufficient. */
+
+static inline void __attribute__((always_inline, artificial))
+pre_seq_barrier(int model)
+{
+ if (model == __ATOMIC_SEQ_CST)
+ __atomic_thread_fence (__ATOMIC_SEQ_CST);
+}
+
+static inline void __attribute__((always_inline, artificial))
+post_seq_barrier(int model)
+{
+ pre_seq_barrier(model);
+}
+
+#define pre_post_seq_barrier 1
+
#include_next <host-config.h>