1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
|
/* Copyright (c) 2013, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/ticketlock.h>
#include <odp/atomic.h>
#include <odp_atomic_internal.h>
#include <odp/sync.h>
#include <odp_spin_internal.h>
void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
{
odp_atomic_init_u32(&ticketlock->next_ticket, 0);
odp_atomic_init_u32(&ticketlock->cur_ticket, 0);
}
void odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
{
uint32_t ticket;
/* Take a ticket using an atomic increment of 'next_ticket'.
* This can be a relaxed operation but it cannot have the
* acquire semantics since we haven't acquired the lock yet */
ticket = odp_atomic_fetch_inc_u32(&ticketlock->next_ticket);
/* Spin waiting for our turn. Use load-acquire so that we acquire
* all stores from the previous lock owner */
while (ticket != _odp_atomic_u32_load_mm(&ticketlock->cur_ticket,
_ODP_MEMMODEL_ACQ))
odp_spin();
}
int odp_ticketlock_trylock(odp_ticketlock_t *tklock)
{
/* We read 'next_ticket' and 'cur_ticket' non-atomically which should
* not be a problem as they are not independent of each other.
* 'cur_ticket' is always <= to 'next_ticket' and if we see an
* older value of 'cur_ticket', this only means the lock will
* look busy and trylock will fail. */
uint32_t next = odp_atomic_load_u32(&tklock->next_ticket);
uint32_t cur = odp_atomic_load_u32(&tklock->cur_ticket);
/* First check that lock is available and possible to take without
* spinning. */
if (next == cur) {
/* Then try to take the lock by incrementing 'next_ticket'
* but only if it still has the original value which is
* equal to 'cur_ticket'.
* We don't have to include 'cur_ticket' in the comparison
* because it cannot be larger than 'next_ticket' (only
* smaller if the lock is busy).
* If CAS fails, it means some other thread intercepted and
* took a ticket which means the lock is not available
* anymore */
if (_odp_atomic_u32_cmp_xchg_strong_mm(&tklock->next_ticket,
&next,
next + 1,
_ODP_MEMMODEL_ACQ,
_ODP_MEMMODEL_RLX))
return 1;
}
return 0;
}
void odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
{
/* Release the lock by incrementing 'cur_ticket'. As we are the
* lock owner and thus the only thread that is allowed to write
* 'cur_ticket', we don't need to do this with an (expensive)
* atomic RMW operation. Instead load-relaxed the current value
* and a store-release of the incremented value */
uint32_t cur = _odp_atomic_u32_load_mm(&ticketlock->cur_ticket,
_ODP_MEMMODEL_RLX);
_odp_atomic_u32_store_mm(&ticketlock->cur_ticket, cur + 1,
_ODP_MEMMODEL_RLS);
#if defined __OCTEON__
odp_sync_stores(); /* SYNCW to flush write buffer */
#endif
}
int odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
{
/* Compare 'cur_ticket' with 'next_ticket'. Ideally we should read
* both variables atomically but the information can become stale
* immediately anyway so the function can only be used reliably in
* a quiescent system where non-atomic loads should not pose a
* problem */
return odp_atomic_load_u32(&ticketlock->cur_ticket) !=
odp_atomic_load_u32(&ticketlock->next_ticket);
}
|