aboutsummaryrefslogtreecommitdiff
path: root/platform/linux-generic/odp_schedule_scalable.c
diff options
context:
space:
mode:
Diffstat (limited to 'platform/linux-generic/odp_schedule_scalable.c')
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c61
1 files changed, 15 insertions, 46 deletions
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 6d60c048f..5166fb6d0 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -223,13 +223,9 @@ void _odp_sched_update_enq(sched_elem_t *q, uint32_t actual)
if (odp_unlikely(ticket != TICKET_INVALID)) {
/* Wait for our turn to update schedq. */
if (odp_unlikely(__atomic_load_n(&q->qschst.cur_ticket,
- __ATOMIC_ACQUIRE) != ticket)) {
- sevl();
- while (wfe() &&
- monitor8(&q->qschst.cur_ticket,
- __ATOMIC_ACQUIRE) != ticket)
- odp_cpu_pause();
- }
+ __ATOMIC_ACQUIRE) != ticket))
+ _odp_wait_until_eq_acq_u8(&q->qschst.cur_ticket, ticket);
+
/* Enqueue at end of scheduler queue */
/* We are here because of empty-to-non-empty transition
* This means queue must be pushed to schedq if possible
@@ -366,13 +362,9 @@ sched_update_deq(sched_elem_t *q,
_ODP_ASSERT(q->qschst_type != ODP_SCHED_SYNC_ATOMIC);
/* Wait for our turn to update schedq. */
if (odp_unlikely(__atomic_load_n(&q->qschst.cur_ticket,
- __ATOMIC_ACQUIRE) != ticket)) {
- sevl();
- while (wfe() &&
- monitor8(&q->qschst.cur_ticket,
- __ATOMIC_ACQUIRE) != ticket)
- odp_cpu_pause();
- }
+ __ATOMIC_ACQUIRE) != ticket))
+ _odp_wait_until_eq_acq_u8(&q->qschst.cur_ticket, ticket);
+
/* We are here because of non-empty-to-empty transition or
* WRR budget exhausted
* This means the queue must be popped from the schedq, now or
@@ -494,12 +486,9 @@ static inline void sched_update_popd(sched_elem_t *elem)
1,
__ATOMIC_RELAXED);
if (odp_unlikely(__atomic_load_n(&elem->qschst.cur_ticket,
- __ATOMIC_ACQUIRE) != ticket)) {
- sevl();
- while (wfe() && monitor8(&elem->qschst.cur_ticket,
- __ATOMIC_ACQUIRE) != ticket)
- odp_cpu_pause();
- }
+ __ATOMIC_ACQUIRE) != ticket))
+ _odp_wait_until_eq_acq_u8(&elem->qschst.cur_ticket, ticket);
+
sched_update_popd_sc(elem);
atomic_store_release(&elem->qschst.cur_ticket, ticket + 1,
/*readonly=*/false);
@@ -1054,15 +1043,8 @@ restart_same:
continue;
}
/* Wait for our turn to dequeue */
- if (odp_unlikely(__atomic_load_n(&rwin->turn,
- __ATOMIC_ACQUIRE)
- != sn)) {
- sevl();
- while (wfe() &&
- monitor32(&rwin->turn, __ATOMIC_ACQUIRE)
- != sn)
- odp_cpu_pause();
- }
+ if (odp_unlikely(__atomic_load_n(&rwin->turn, __ATOMIC_ACQUIRE) != sn))
+ _odp_wait_until_eq_acq_u32(&rwin->turn, sn);
#ifdef CONFIG_QSCHST_LOCK
LOCK(&elem->qschlock);
#endif
@@ -1143,13 +1125,8 @@ static void schedule_order_lock(uint32_t lock_index)
return;
}
if (odp_unlikely(__atomic_load_n(&rctx->rwin->olock[lock_index],
- __ATOMIC_ACQUIRE) != rctx->sn)) {
- sevl();
- while (wfe() &&
- monitor32(&rctx->rwin->olock[lock_index],
- __ATOMIC_ACQUIRE) != rctx->sn)
- odp_cpu_pause();
- }
+ __ATOMIC_ACQUIRE) != rctx->sn))
+ _odp_wait_until_eq_acq_u32(&rctx->rwin->olock[lock_index], rctx->sn);
}
static void schedule_order_unlock(uint32_t lock_index)
@@ -1555,12 +1532,7 @@ static int schedule_group_destroy(odp_schedule_group_t group)
if (sg->xcount[p] != 0) {
bitset_t wanted = atom_bitset_load(&sg->thr_wanted, __ATOMIC_RELAXED);
- sevl();
- while (wfe() &&
- !bitset_is_eql(wanted,
- bitset_monitor(&sg->thr_actual[p],
- __ATOMIC_RELAXED)))
- odp_cpu_pause();
+ _odp_wait_until_eq_bitset(&sg->thr_actual[p], wanted);
}
/* Else ignore because no ODP queues on this prio */
}
@@ -2127,13 +2099,10 @@ static void order_lock(void)
_ODP_ASSERT(ts->rctx != NULL);
rwin = ts->rctx->rwin;
sn = ts->rctx->sn;
- sevl();
/* Use acquire ordering to be on the safe side even if
* this isn't an acquire/release situation (aka lock).
*/
- while (wfe() &&
- monitor32(&rwin->hc.head, __ATOMIC_ACQUIRE) != sn)
- odp_cpu_pause();
+ _odp_wait_until_eq_acq_u32(&rwin->hc.head, sn);
}
}