summaryrefslogtreecommitdiff
path: root/libstdc++-v3/include/parallel
diff options
context:
space:
mode:
authorJonathan Wakely <jwakely.gcc@gmail.com>2012-10-09 08:16:13 +0000
committerJonathan Wakely <redi@gcc.gnu.org>2012-10-09 09:16:13 +0100
commit8024199176f5092a9248acf99b88bbe890406bb9 (patch)
tree87cbb09e7fd23d73d39e84854a1fad151118a777 /libstdc++-v3/include/parallel
parenteef26c05fb7c3a758b48b8bf873390623fa3d747 (diff)
re PR libstdc++/54754 ([parallel mode] 'make check-parallel' only works on x86-64)
PR libstdc++/54754 * include/parallel/compatibility.h: Use atomic built-ins when they are lock-free. From-SVN: r192240
Diffstat (limited to 'libstdc++-v3/include/parallel')
-rw-r--r--libstdc++-v3/include/parallel/compatibility.h170
1 files changed, 43 insertions, 127 deletions
diff --git a/libstdc++-v3/include/parallel/compatibility.h b/libstdc++-v3/include/parallel/compatibility.h
index 03506d84de3..a58e65fe60c 100644
--- a/libstdc++-v3/include/parallel/compatibility.h
+++ b/libstdc++-v3/include/parallel/compatibility.h
@@ -51,154 +51,70 @@ __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
namespace __gnu_parallel
{
- // These atomic functions only work on integers
-
- /** @brief Add a value to a variable, atomically.
- *
- * Implementation is heavily platform-dependent.
- * @param __ptr Pointer to a 32-bit signed integer.
- * @param __addend Value to add.
- */
- inline int32_t
- __fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend)
- {
- return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
- }
-
- /** @brief Add a value to a variable, atomically.
- *
- * Implementation is heavily platform-dependent.
- * @param __ptr Pointer to a 64-bit signed integer.
- * @param __addend Value to add.
- */
- inline int64_t
- __fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend)
- {
-#if defined(__x86_64)
- return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
-#elif defined(__i386) && \
- (defined(__i686) || defined(__pentium4) || defined(__athlon) \
- || defined(__k8) || defined(__core2))
- return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
-#else //fallback, slow
-#if defined(__i386)
- // XXX doesn'__t work with -march=native
- //#warning "please compile with -march=i686 or better"
-#endif
-#pragma message("slow __fetch_and_add_64")
- int64_t __res;
-#pragma omp critical
+ template<typename _Tp>
+ inline _Tp
+ __add_omp(volatile _Tp* __ptr, _Tp __addend)
{
- __res = *__ptr;
- *(__ptr) += __addend;
+ int64_t __res;
+#pragma omp critical
+ {
+ __res = *__ptr;
+ *(__ptr) += __addend;
+ }
+ return __res;
}
- return __res;
-#endif
- }
/** @brief Add a value to a variable, atomically.
*
- * Implementation is heavily platform-dependent.
* @param __ptr Pointer to a signed integer.
* @param __addend Value to add.
*/
template<typename _Tp>
- inline _Tp
- __fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
- {
- if (sizeof(_Tp) == sizeof(int32_t))
- return
- (_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend);
- else if (sizeof(_Tp) == sizeof(int64_t))
- return
- (_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend);
- else
- _GLIBCXX_PARALLEL_ASSERT(false);
- }
-
- /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
- * *__ptr=__replacement and return @c true, return @c false otherwise.
- *
- * Implementation is heavily platform-dependent.
- * @param __ptr Pointer to 32-bit signed integer.
- * @param __comparand Compare value.
- * @param __replacement Replacement value.
- */
- inline bool
- __compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand,
- int32_t __replacement)
- {
- return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
- false, __ATOMIC_ACQ_REL,
- __ATOMIC_RELAXED);
- }
+ inline _Tp
+ __fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
+ {
+ if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
+ return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
+ return __add_omp(__ptr, __addend);
+ }
- /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
- * *__ptr=__replacement and return @c true, return @c false otherwise.
- *
- * Implementation is heavily platform-dependent.
- * @param __ptr Pointer to 64-bit signed integer.
- * @param __comparand Compare value.
- * @param __replacement Replacement value.
- */
- inline bool
- __compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand,
- int64_t __replacement)
- {
-#if defined(__x86_64)
- return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
- false, __ATOMIC_ACQ_REL,
- __ATOMIC_RELAXED);
-#elif defined(__i386) && \
- (defined(__i686) || defined(__pentium4) || defined(__athlon) \
- || defined(__k8) || defined(__core2))
- return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
- false, __ATOMIC_ACQ_REL,
- __ATOMIC_RELAXED);
-#else
-#if defined(__i386)
- // XXX -march=native
- //#warning "please compile with -march=i686 or better"
-#endif
-#pragma message("slow __compare_and_swap_64")
- bool __res = false;
-#pragma omp critical
+ template<typename _Tp>
+ inline bool
+ __cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
- if (*__ptr == __comparand)
- {
- *__ptr = __replacement;
- __res = true;
- }
+ bool __res = false;
+#pragma omp critical
+ {
+ if (*__ptr == __comparand)
+ {
+ *__ptr = __replacement;
+ __res = true;
+ }
+ }
+ return __res;
}
- return __res;
-#endif
- }
- /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
+ /** @brief Compare-and-swap
+ *
+ * Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
- * Implementation is heavily platform-dependent.
* @param __ptr Pointer to signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
template<typename _Tp>
- inline bool
- __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
- {
- if (sizeof(_Tp) == sizeof(int32_t))
- return __compare_and_swap_32((volatile int32_t*) __ptr,
- (int32_t)__comparand,
- (int32_t)__replacement);
- else if (sizeof(_Tp) == sizeof(int64_t))
- return __compare_and_swap_64((volatile int64_t*) __ptr,
- (int64_t)__comparand,
- (int64_t)__replacement);
- else
- _GLIBCXX_PARALLEL_ASSERT(false);
- }
+ inline bool
+ __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
+ {
+ if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
+ return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
+ false, __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED);
+ return __cas_omp(__ptr, __comparand, __replacement);
+ }
- /** @brief Yield the control to another thread, without waiting for
+ /** @brief Yield control to another thread, without waiting for
* the end of the time slice.
*/
inline void