diff options
author | Jerome Forissier <jerome@forissier.org> | 2020-07-09 19:46:15 +0200 |
---|---|---|
committer | Jérôme Forissier <jerome@forissier.org> | 2020-07-14 23:48:08 +0200 |
commit | 42fb53ca019e4396621909c1663e21b6001c007d (patch) | |
tree | 6707d36d74a6f3a1f33d5746ffa99206e10aae77 | |
parent | 64fd5a92cf37709d37566c7a5467e97d7b5a824c (diff) |
core: introduce recursive mutexes
Adds support for recursive mutex objects. A recursive mutex may be
locked several times by the same thread without causing a deadlock.
The implementation is copied from the get_pool()/put_pool() functions
in lib/libutils/ext/mempool.c, which will be updated to use the new
mutex type in a later commit.
In order to avoid the overhead associated with recursive mutexes when
not needed, a new struct recursive_mutex is introduced as well as
specific functions: mutex_init_recursive(), mutex_destroy_recursive(),
mutex_lock_recursive() and mutex_unlock_recursive(). A static initializer
is also available (RECURSIVE_MUTEX_INITIALIZER).
mutex_get_recursive_lock_depth() returns the current lock depth (only
valid to call from the thread holding the lock).
Signed-off-by: Jerome Forissier <jerome@forissier.org>
Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org>
Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r-- | core/arch/arm/include/kernel/mutex.h | 31 | ||||
-rw-r--r-- | core/arch/arm/kernel/mutex.c | 78 |
2 files changed, 107 insertions, 2 deletions
diff --git a/core/arch/arm/include/kernel/mutex.h b/core/arch/arm/include/kernel/mutex.h index f1ca2a6c..e4c470b0 100644 --- a/core/arch/arm/include/kernel/mutex.h +++ b/core/arch/arm/include/kernel/mutex.h @@ -5,22 +5,37 @@ #ifndef KERNEL_MUTEX_H #define KERNEL_MUTEX_H -#include <types_ext.h> -#include <sys/queue.h> +#include <kernel/refcount.h> #include <kernel/wait_queue.h> +#include <sys/queue.h> +#include <types_ext.h> struct mutex { unsigned spin_lock; /* used when operating on this struct */ struct wait_queue wq; short state; /* -1: write, 0: unlocked, > 0: readers */ }; + #define MUTEX_INITIALIZER { .wq = WAIT_QUEUE_INITIALIZER } +struct recursive_mutex { + struct mutex m; /* used when lock_depth goes 0 -> 1 or 1 -> 0 */ + short int owner; + struct refcount lock_depth; +}; + +#define RECURSIVE_MUTEX_INITIALIZER { .m = MUTEX_INITIALIZER, \ + .owner = THREAD_ID_INVALID } + TAILQ_HEAD(mutex_head, mutex); void mutex_init(struct mutex *m); void mutex_destroy(struct mutex *m); +void mutex_init_recursive(struct recursive_mutex *m); +void mutex_destroy_recursive(struct recursive_mutex *m); +unsigned int mutex_get_recursive_lock_depth(struct recursive_mutex *m); + #ifdef CFG_MUTEX_DEBUG void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno); #define mutex_unlock(m) mutex_unlock_debug((m), __FILE__, __LINE__) @@ -40,6 +55,15 @@ void mutex_read_lock_debug(struct mutex *m, const char *fname, int lineno); bool mutex_read_trylock_debug(struct mutex *m, const char *fname, int lineno); #define mutex_read_trylock(m) mutex_read_trylock_debug((m), __FILE__, __LINE__) +void mutex_unlock_recursive_debug(struct recursive_mutex *m, const char *fname, + int lineno); +#define mutex_unlock_recursive(m) mutex_unlock_recursive_debug((m), __FILE__, \ + __LINE__) + +void mutex_lock_recursive_debug(struct recursive_mutex *m, const char *fname, + int lineno); +#define mutex_lock_recursive(m) mutex_lock_recursive_debug((m), __FILE__, \ + __LINE__) #else void mutex_unlock(struct mutex *m); void mutex_lock(struct mutex *m); @@ -47,6 +71,9 @@ bool mutex_trylock(struct mutex *m); void mutex_read_unlock(struct mutex *m); void mutex_read_lock(struct mutex *m); bool mutex_read_trylock(struct mutex *m); + +void mutex_unlock_recursive(struct recursive_mutex *m); +void mutex_lock_recursive(struct recursive_mutex *m); #endif struct condvar { diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c index c51b97c6..45028c43 100644 --- a/core/arch/arm/kernel/mutex.c +++ b/core/arch/arm/kernel/mutex.c @@ -5,6 +5,7 @@ #include <kernel/mutex.h> #include <kernel/panic.h> +#include <kernel/refcount.h> #include <kernel/spinlock.h> #include <kernel/thread.h> #include <trace.h> @@ -16,6 +17,11 @@ void mutex_init(struct mutex *m) *m = (struct mutex)MUTEX_INITIALIZER; } +void mutex_init_recursive(struct recursive_mutex *m) +{ + *m = (struct recursive_mutex)RECURSIVE_MUTEX_INITIALIZER; +} + static void __mutex_lock(struct mutex *m, const char *fname, int lineno) { assert_have_no_spinlock(); @@ -60,6 +66,27 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno) } } +static void __mutex_lock_recursive(struct recursive_mutex *m, const char *fname, + int lineno) +{ + short int ct = thread_get_id(); + + assert_have_no_spinlock(); + assert(thread_is_in_normal_mode()); + + if (atomic_load_short(&m->owner) == ct) { + if (!refcount_inc(&m->lock_depth)) + panic(); + return; + } + + __mutex_lock(&m->m, fname, lineno); + + assert(m->owner == THREAD_ID_INVALID); + atomic_store_short(&m->owner, ct); + refcount_set(&m->lock_depth, 1); +} + static void __mutex_unlock(struct mutex *m, const char *fname, int lineno) { uint32_t old_itr_status; @@ -81,6 +108,22 @@ static void __mutex_unlock(struct mutex *m, const char *fname, int lineno) wq_wake_next(&m->wq, m, fname, lineno); } +static void __mutex_unlock_recursive(struct recursive_mutex *m, + const char *fname, int lineno) +{ + assert_have_no_spinlock(); + assert(m->owner == thread_get_id()); + + if (refcount_dec(&m->lock_depth)) { + /* + * Do an atomic store to match the atomic load in + * __mutex_lock_recursive() + */ + atomic_store_short(&m->owner, THREAD_ID_INVALID); + __mutex_unlock(&m->m, fname, lineno); + } +} + static bool __mutex_trylock(struct mutex *m, const char *fname __unused, int lineno __unused) { @@ -219,17 +262,39 @@ bool mutex_read_trylock_debug(struct mutex *m, const char *fname, int lineno) { return __mutex_read_trylock(m, fname, lineno); } + +void mutex_unlock_recursive_debug(struct recursive_mutex *m, const char *fname, + int lineno) +{ + __mutex_unlock_recursive(m, fname, lineno); +} + +void mutex_lock_recursive_debug(struct recursive_mutex *m, const char *fname, + int lineno) +{ + __mutex_lock_recursive(m, fname, lineno); +} #else void mutex_unlock(struct mutex *m) { __mutex_unlock(m, NULL, -1); } +void mutex_unlock_recursive(struct recursive_mutex *m) +{ + __mutex_unlock_recursive(m, NULL, -1); +} + void mutex_lock(struct mutex *m) { __mutex_lock(m, NULL, -1); } +void mutex_lock_recursive(struct recursive_mutex *m) +{ + __mutex_lock_recursive(m, NULL, -1); +} + bool mutex_trylock(struct mutex *m) { return __mutex_trylock(m, NULL, -1); @@ -264,6 +329,19 @@ void mutex_destroy(struct mutex *m) mutex_destroy_check(m); } +void mutex_destroy_recursive(struct recursive_mutex *m) +{ + mutex_destroy(&m->m); +} + +unsigned int mutex_get_recursive_lock_depth(struct recursive_mutex *m) +{ + assert_have_no_spinlock(); + assert(m->owner == thread_get_id()); + + return refcount_val(&m->lock_depth); +} + void condvar_init(struct condvar *cv) { *cv = (struct condvar)CONDVAR_INITIALIZER; |