summaryrefslogtreecommitdiff
path: root/src/util
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2017-10-22 17:38:46 +0200
committerNicolai Hähnle <nicolai.haehnle@amd.com>2017-11-09 13:58:10 +0100
commite3a8013de8ca854d21225be00f123ccf63f9060f (patch)
treeb0163adc1cbfe8ac52e7c98987ebd30d1d4e8f25 /src/util
parentf1a364878431c8c5f4fd38b40b9766449e49f552 (diff)
util/u_queue: add util_queue_fence_wait_timeout
v2: - style fixes - fix missing timeout handling in futex path Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Diffstat (limited to 'src/util')
-rw-r--r--src/util/futex.h9
-rw-r--r--src/util/simple_mtx.h2
-rw-r--r--src/util/u_queue.c82
-rw-r--r--src/util/u_queue.h54
4 files changed, 121 insertions, 26 deletions
diff --git a/src/util/futex.h b/src/util/futex.h
index 722cdd35f6..4402893069 100644
--- a/src/util/futex.h
+++ b/src/util/futex.h
@@ -33,7 +33,7 @@
#include <sys/syscall.h>
#include <sys/time.h>
-static inline long sys_futex(void *addr1, int op, int val1, struct timespec *timeout, void *addr2, int val3)
+static inline long sys_futex(void *addr1, int op, int val1, const struct timespec *timeout, void *addr2, int val3)
{
return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
}
@@ -43,9 +43,12 @@ static inline int futex_wake(uint32_t *addr, int count)
return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0);
}
-static inline int futex_wait(uint32_t *addr, int32_t value)
+static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
{
- return sys_futex(addr, FUTEX_WAIT, value, NULL, NULL, 0);
+ /* FUTEX_WAIT_BITSET with FUTEX_BITSET_MATCH_ANY is equivalent to
+ * FUTEX_WAIT, except that it treats the timeout as absolute. */
+ return sys_futex(addr, FUTEX_WAIT_BITSET, value, timeout, NULL,
+ FUTEX_BITSET_MATCH_ANY);
}
#endif
diff --git a/src/util/simple_mtx.h b/src/util/simple_mtx.h
index 9f9e40861f..cd24b6f9eb 100644
--- a/src/util/simple_mtx.h
+++ b/src/util/simple_mtx.h
@@ -83,7 +83,7 @@ simple_mtx_lock(simple_mtx_t *mtx)
if (c != 2)
c = __sync_lock_test_and_set(&mtx->val, 2);
while (c != 0) {
- futex_wait(&mtx->val, 2);
+ futex_wait(&mtx->val, 2, NULL);
c = __sync_lock_test_and_set(&mtx->val, 2);
}
}
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
index 706ee8b04d..43c28ac6ef 100644
--- a/src/util/u_queue.c
+++ b/src/util/u_queue.c
@@ -26,6 +26,9 @@
#include "u_queue.h"
+#include <time.h>
+
+#include "util/os_time.h"
#include "util/u_string.h"
#include "util/u_thread.h"
@@ -91,6 +94,50 @@ remove_from_atexit_list(struct util_queue *queue)
* util_queue_fence
*/
+#ifdef UTIL_QUEUE_FENCE_FUTEX
+static bool
+do_futex_fence_wait(struct util_queue_fence *fence,
+ bool timeout, int64_t abs_timeout)
+{
+ uint32_t v = fence->val;
+ struct timespec ts;
+ ts.tv_sec = abs_timeout / (1000*1000*1000);
+ ts.tv_nsec = abs_timeout % (1000*1000*1000);
+
+ while (v != 0) {
+ if (v != 2) {
+ v = p_atomic_cmpxchg(&fence->val, 1, 2);
+ if (v == 0)
+ return true;
+ }
+
+ int r = futex_wait(&fence->val, 2, timeout ? &ts : NULL);
+ if (timeout && r < 0) {
+ if (errno == -ETIMEDOUT)
+ return false;
+ }
+
+ v = fence->val;
+ }
+
+ return true;
+}
+
+void
+_util_queue_fence_wait(struct util_queue_fence *fence)
+{
+ do_futex_fence_wait(fence, false, 0);
+}
+
+bool
+_util_queue_fence_wait_timeout(struct util_queue_fence *fence,
+ int64_t abs_timeout)
+{
+ return do_futex_fence_wait(fence, true, abs_timeout);
+}
+
+#endif
+
#ifdef UTIL_QUEUE_FENCE_STANDARD
void
util_queue_fence_signal(struct util_queue_fence *fence)
@@ -102,7 +149,7 @@ util_queue_fence_signal(struct util_queue_fence *fence)
}
void
-util_queue_fence_wait(struct util_queue_fence *fence)
+_util_queue_fence_wait(struct util_queue_fence *fence)
{
mtx_lock(&fence->mutex);
while (!fence->signalled)
@@ -110,6 +157,39 @@ util_queue_fence_wait(struct util_queue_fence *fence)
mtx_unlock(&fence->mutex);
}
+bool
+_util_queue_fence_wait_timeout(struct util_queue_fence *fence,
+ int64_t abs_timeout)
+{
+ /* This terrible hack is made necessary by the fact that we really want an
+ * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
+ * to be relative to the TIME_UTC clock.
+ */
+ int64_t rel = abs_timeout - os_time_get_nano();
+
+ if (rel > 0) {
+ struct timespec ts;
+
+ timespec_get(&ts, TIME_UTC);
+
+ ts.tv_sec += abs_timeout / (1000*1000*1000);
+ ts.tv_nsec += abs_timeout % (1000*1000*1000);
+ if (ts.tv_nsec >= (1000*1000*1000)) {
+ ts.tv_sec++;
+ ts.tv_nsec -= (1000*1000*1000);
+ }
+
+ mtx_lock(&fence->mutex);
+ while (!fence->signalled) {
+ if (cnd_timedwait(&fence->cond, &fence->mutex, &ts) != thrd_success)
+ break;
+ }
+ mtx_unlock(&fence->mutex);
+ }
+
+ return fence->signalled;
+}
+
void
util_queue_fence_init(struct util_queue_fence *fence)
{
diff --git a/src/util/u_queue.h b/src/util/u_queue.h
index a54ec71011..ec02815748 100644
--- a/src/util/u_queue.h
+++ b/src/util/u_queue.h
@@ -81,26 +81,6 @@ util_queue_fence_destroy(struct util_queue_fence *fence)
}
static inline void
-util_queue_fence_wait(struct util_queue_fence *fence)
-{
- uint32_t v = fence->val;
-
- if (likely(v == 0))
- return;
-
- do {
- if (v != 2) {
- v = p_atomic_cmpxchg(&fence->val, 1, 2);
- if (v == 0)
- return;
- }
-
- futex_wait(&fence->val, 2);
- v = fence->val;
- } while(v != 0);
-}
-
-static inline void
util_queue_fence_signal(struct util_queue_fence *fence)
{
uint32_t val = p_atomic_xchg(&fence->val, 0);
@@ -147,7 +127,6 @@ struct util_queue_fence {
void util_queue_fence_init(struct util_queue_fence *fence);
void util_queue_fence_destroy(struct util_queue_fence *fence);
-void util_queue_fence_wait(struct util_queue_fence *fence);
void util_queue_fence_signal(struct util_queue_fence *fence);
/**
@@ -170,6 +149,39 @@ util_queue_fence_is_signalled(struct util_queue_fence *fence)
}
#endif
+void
+_util_queue_fence_wait(struct util_queue_fence *fence);
+
+static inline void
+util_queue_fence_wait(struct util_queue_fence *fence)
+{
+ if (unlikely(!util_queue_fence_is_signalled(fence)))
+ _util_queue_fence_wait(fence);
+}
+
+bool
+_util_queue_fence_wait_timeout(struct util_queue_fence *fence,
+ int64_t abs_timeout);
+
+/**
+ * Wait for the fence to be signaled with a timeout.
+ *
+ * \param fence the fence
+ * \param abs_timeout the absolute timeout in nanoseconds, relative to the
+ * clock provided by os_time_get_nano.
+ *
+ * \return true if the fence was signaled, false if the timeout occurred.
+ */
+static inline bool
+util_queue_fence_wait_timeout(struct util_queue_fence *fence,
+ int64_t abs_timeout)
+{
+ if (util_queue_fence_is_signalled(fence))
+ return true;
+
+ return _util_queue_fence_wait_timeout(fence, abs_timeout);
+}
+
typedef void (*util_queue_execute_func)(void *job, int thread_index);
struct util_queue_job {