From 2055da97389a605c8a00d163d40903afbe413921 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 20 Jun 2017 12:06:46 +0200 Subject: sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming So I've noticed a number of instances where it was not obvious from the code whether ->task_list was for a wait-queue head or a wait-queue entry. Furthermore, there's a number of wait-queue users where the lists are not for 'tasks' but other entities (poll tables, etc.), in which case the 'task_list' name is actively confusing. To clear this all up, name the wait-queue head and entry list structure fields unambiguously: struct wait_queue_head::task_list => ::head struct wait_queue_entry::task_list => ::entry For example, this code: rqw->wait.task_list.next != &wait->task_list ... is was pretty unclear (to me) what it's doing, while now it's written this way: rqw->wait.head.next != &wait->entry ... which makes it pretty clear that we are iterating a list until we see the head. Other examples are: list_for_each_entry_safe(pos, next, &x->task_list, task_list) { list_for_each_entry(wq, &fence->wait.task_list, task_list) { ... where it's unclear (to me) what we are iterating, and during review it's hard to tell whether it's trying to walk a wait-queue entry (which would be a bug), while now it's written as: list_for_each_entry_safe(pos, next, &x->head, entry) { list_for_each_entry(wq, &fence->wait.head, entry) { Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/wait.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'kernel/sched/wait.c') diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 6bcd7c3c4501..17f11c6b0a9f 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -16,7 +16,7 @@ void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, st { spin_lock_init(&wq_head->lock); lockdep_set_class_and_name(&wq_head->lock, key, name); - INIT_LIST_HEAD(&wq_head->task_list); + INIT_LIST_HEAD(&wq_head->head); } EXPORT_SYMBOL(__init_waitqueue_head); @@ -68,7 +68,7 @@ static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, { wait_queue_entry_t *curr, *next; - list_for_each_entry_safe(curr, next, &wq_head->task_list, task_list) { + list_for_each_entry_safe(curr, next, &wq_head->head, entry) { unsigned flags = curr->flags; if (curr->func(curr, mode, wake_flags, key) && @@ -176,7 +176,7 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); - if (list_empty(&wq_entry->task_list)) + if (list_empty(&wq_entry->entry)) __add_wait_queue(wq_head, wq_entry); set_current_state(state); spin_unlock_irqrestore(&wq_head->lock, flags); @@ -190,7 +190,7 @@ prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_ent wq_entry->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&wq_head->lock, flags); - if (list_empty(&wq_entry->task_list)) + if (list_empty(&wq_entry->entry)) __add_wait_queue_entry_tail(wq_head, wq_entry); set_current_state(state); spin_unlock_irqrestore(&wq_head->lock, flags); @@ -202,7 +202,7 @@ void init_wait_entry(struct wait_queue_entry *wq_entry, int flags) wq_entry->flags = flags; wq_entry->private = current; wq_entry->func = autoremove_wake_function; - INIT_LIST_HEAD(&wq_entry->task_list); + INIT_LIST_HEAD(&wq_entry->entry); } EXPORT_SYMBOL(init_wait_entry); @@ -225,10 +225,10 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en * can't see us, it should wake up another exclusive waiter if * we fail. */ - list_del_init(&wq_entry->task_list); + list_del_init(&wq_entry->entry); ret = -ERESTARTSYS; } else { - if (list_empty(&wq_entry->task_list)) { + if (list_empty(&wq_entry->entry)) { if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) __add_wait_queue_entry_tail(wq_head, wq_entry); else @@ -251,7 +251,7 @@ EXPORT_SYMBOL(prepare_to_wait_event); */ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) { - if (likely(list_empty(&wait->task_list))) + if (likely(list_empty(&wait->entry))) __add_wait_queue_entry_tail(wq, wait); set_current_state(TASK_INTERRUPTIBLE); @@ -267,7 +267,7 @@ EXPORT_SYMBOL(do_wait_intr); int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) { - if (likely(list_empty(&wait->task_list))) + if (likely(list_empty(&wait->entry))) __add_wait_queue_entry_tail(wq, wait); set_current_state(TASK_INTERRUPTIBLE); @@ -308,9 +308,9 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en * have _one_ other CPU that looks at or modifies * the list). */ - if (!list_empty_careful(&wq_entry->task_list)) { + if (!list_empty_careful(&wq_entry->entry)) { spin_lock_irqsave(&wq_head->lock, flags); - list_del_init(&wq_entry->task_list); + list_del_init(&wq_entry->entry); spin_unlock_irqrestore(&wq_head->lock, flags); } } @@ -321,7 +321,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i int ret = default_wake_function(wq_entry, mode, sync, key); if (ret) - list_del_init(&wq_entry->task_list); + list_del_init(&wq_entry->entry); return ret; } EXPORT_SYMBOL(autoremove_wake_function); -- cgit v1.2.3