aboutsummaryrefslogtreecommitdiff
path: root/platform
diff options
context:
space:
mode:
authorOla Liljedahl <ola.liljedahl@linaro.org>2014-11-24 23:38:42 +0100
committerMaxim Uvarov <maxim.uvarov@linaro.org>2014-11-25 18:23:51 +0300
commit73be873ca33443e5fb49c7fbe982603a1682bb08 (patch)
tree1967ef42828b48ea0a4c680d86356f0f212641f2 /platform
parentaa0cd5993b5452275bd1398cadf57a2b1e206cf3 (diff)
linux-generic: odp_ring.c use __atomic
Signed-off-by: Ola Liljedahl <ola.liljedahl@linaro.org> Reviewed-by: Petri Savolainen <petri.savolainen@linaro.org> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
Diffstat (limited to 'platform')
-rw-r--r--platform/linux-generic/odp_ring.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/platform/linux-generic/odp_ring.c b/platform/linux-generic/odp_ring.c
index 632aa6606..1d3130a61 100644
--- a/platform/linux-generic/odp_ring.c
+++ b/platform/linux-generic/odp_ring.c
@@ -259,13 +259,16 @@ int __odph_ring_mp_do_enqueue(odph_ring_t *r, void * const *obj_table,
}
prod_next = prod_head + n;
- success = odp_atomic_cmpset_u32(&r->prod.head, prod_head,
- prod_next);
+ success = __atomic_compare_exchange_n(&r->prod.head,
+ &prod_head,
+ prod_next,
+ false/*strong*/,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED);
} while (odp_unlikely(success == 0));
/* write entries in ring */
ENQUEUE_PTRS();
- odp_mem_barrier();
/* if we exceed the watermark */
if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -282,6 +285,8 @@ int __odph_ring_mp_do_enqueue(odph_ring_t *r, void * const *obj_table,
while (odp_unlikely(r->prod.tail != prod_head))
odp_spin();
+ /* Release our entries and the memory they refer to */
+ __atomic_thread_fence(__ATOMIC_RELEASE);
r->prod.tail = prod_next;
return ret;
}
@@ -324,7 +329,6 @@ int __odph_ring_sp_do_enqueue(odph_ring_t *r, void * const *obj_table,
/* write entries in ring */
ENQUEUE_PTRS();
- odp_mem_barrier();
/* if we exceed the watermark */
if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
@@ -334,6 +338,8 @@ int __odph_ring_sp_do_enqueue(odph_ring_t *r, void * const *obj_table,
ret = (behavior == ODPH_RING_QUEUE_FIXED) ? 0 : n;
}
+ /* Release our entries and the memory they refer to */
+ __atomic_thread_fence(__ATOMIC_RELEASE);
r->prod.tail = prod_next;
return ret;
}
@@ -378,13 +384,16 @@ int __odph_ring_mc_do_dequeue(odph_ring_t *r, void **obj_table,
}
cons_next = cons_head + n;
- success = odp_atomic_cmpset_u32(&r->cons.head, cons_head,
- cons_next);
+ success = __atomic_compare_exchange_n(&r->cons.head,
+ &cons_head,
+ cons_next,
+ false/*strong*/,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED);
} while (odp_unlikely(success == 0));
/* copy in table */
DEQUEUE_PTRS();
- odp_mem_barrier();
/*
* If there are other dequeues in progress that preceded us,
@@ -393,6 +402,8 @@ int __odph_ring_mc_do_dequeue(odph_ring_t *r, void **obj_table,
while (odp_unlikely(r->cons.tail != cons_head))
odp_spin();
+ /* Release our entries and the memory they refer to */
+ __atomic_thread_fence(__ATOMIC_RELEASE);
r->cons.tail = cons_next;
return behavior == ODPH_RING_QUEUE_FIXED ? 0 : n;
@@ -431,9 +442,10 @@ int __odph_ring_sc_do_dequeue(odph_ring_t *r, void **obj_table,
cons_next = cons_head + n;
r->cons.head = cons_next;
+ /* Acquire the pointers and the memory they refer to */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
/* copy in table */
DEQUEUE_PTRS();
- odp_mem_barrier();
r->cons.tail = cons_next;
return behavior == ODPH_RING_QUEUE_FIXED ? 0 : n;