aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBill Fischofer <bill.fischofer@linaro.org>2015-09-03 10:16:39 -0500
committerMaxim Uvarov <maxim.uvarov@linaro.org>2015-09-03 19:29:34 +0300
commit5cdd039a6bcecae79d1fad0f2b4d52ad43c8e6d5 (patch)
tree0a00fe4486ea8cd4f9f0a314e4e9e8e850cc8257
parent9c3fcc0e5c11cf8117672f63a346b80a66d72103 (diff)
linux-generic: schedule: fix race condition in ordered locks
Correct race condition that arises due to attempt to permit ordered locks to be reusable. For now, ordered locks can only be used once per scheduled event. Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> Reviewed-and-Tested-by: Maxim Uvarov <maxim.uvarov@linaro.org> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
-rw-r--r--platform/linux-generic/include/odp_queue_internal.h6
-rw-r--r--platform/linux-generic/odp_queue.c8
2 files changed, 6 insertions, 8 deletions
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h
index 48576bc..0f30965 100644
--- a/platform/linux-generic/include/odp_queue_internal.h
+++ b/platform/linux-generic/include/odp_queue_internal.h
@@ -193,8 +193,12 @@ static inline void reorder_enq(queue_entry_t *queue,
static inline void order_release(queue_entry_t *origin_qe, int count)
{
+ uint64_t sync = odp_atomic_load_u64(&origin_qe->s.sync_out);
+
origin_qe->s.order_out += count;
- odp_atomic_fetch_add_u64(&origin_qe->s.sync_out, count);
+ if (sync < origin_qe->s.order_out)
+ odp_atomic_fetch_add_u64(&origin_qe->s.sync_out,
+ origin_qe->s.order_out - sync);
}
static inline int reorder_deq(queue_entry_t *queue,
diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c
index 15abd93..ac933da 100644
--- a/platform/linux-generic/odp_queue.c
+++ b/platform/linux-generic/odp_queue.c
@@ -999,12 +999,6 @@ void odp_schedule_order_unlock(odp_schedule_order_lock_t *lock ODP_UNUSED)
if (!origin_qe)
return;
- /* Get a new sync order for reusability, and release the lock. Note
- * that this must be done in this sequence to prevent race conditions
- * where the next waiter could lock and unlock before we're able to
- * get a new sync order since that would cause order inversion on
- * subsequent locks we may perform in this ordered context.
- */
- *sync = odp_atomic_fetch_inc_u64(&origin_qe->s.sync_in);
+ /* Release the ordered lock */
odp_atomic_fetch_inc_u64(&origin_qe->s.sync_out);
}