lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Simple waitqueues without fancy flags and callbacks |
| 3 | * |
| 4 | * (C) 2011 Thomas Gleixner <tglx@linutronix.de> |
| 5 | * |
| 6 | * Based on kernel/wait.c |
| 7 | * |
| 8 | * For licencing details see kernel-base/COPYING |
| 9 | */ |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/export.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/wait-simple.h> |
| 14 | |
| 15 | /* Adds w to head->list. Must be called with head->lock locked. */ |
| 16 | static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) |
| 17 | { |
| 18 | list_add(&w->node, &head->list); |
| 19 | } |
| 20 | |
| 21 | /* Removes w from head->list. Must be called with head->lock locked. */ |
| 22 | static inline void __swait_dequeue(struct swaiter *w) |
| 23 | { |
| 24 | list_del_init(&w->node); |
| 25 | } |
| 26 | |
| 27 | /* Check whether a head has waiters enqueued */ |
| 28 | static inline bool swait_head_has_waiters(struct swait_head *h) |
| 29 | { |
| 30 | return !list_empty(&h->list); |
| 31 | } |
| 32 | |
| 33 | void __init_swait_head(struct swait_head *head, struct lock_class_key *key) |
| 34 | { |
| 35 | raw_spin_lock_init(&head->lock); |
| 36 | lockdep_set_class(&head->lock, key); |
| 37 | INIT_LIST_HEAD(&head->list); |
| 38 | } |
| 39 | EXPORT_SYMBOL_GPL(__init_swait_head); |
| 40 | |
| 41 | void swait_prepare_locked(struct swait_head *head, struct swaiter *w) |
| 42 | { |
| 43 | w->task = current; |
| 44 | if (list_empty(&w->node)) |
| 45 | __swait_enqueue(head, w); |
| 46 | } |
| 47 | |
| 48 | void swait_prepare(struct swait_head *head, struct swaiter *w, int state) |
| 49 | { |
| 50 | unsigned long flags; |
| 51 | |
| 52 | raw_spin_lock_irqsave(&head->lock, flags); |
| 53 | swait_prepare_locked(head, w); |
| 54 | __set_current_state(state); |
| 55 | raw_spin_unlock_irqrestore(&head->lock, flags); |
| 56 | } |
| 57 | EXPORT_SYMBOL_GPL(swait_prepare); |
| 58 | |
| 59 | void swait_finish_locked(struct swait_head *head, struct swaiter *w) |
| 60 | { |
| 61 | __set_current_state(TASK_RUNNING); |
| 62 | if (w->task) |
| 63 | __swait_dequeue(w); |
| 64 | } |
| 65 | |
| 66 | void swait_finish(struct swait_head *head, struct swaiter *w) |
| 67 | { |
| 68 | unsigned long flags; |
| 69 | |
| 70 | __set_current_state(TASK_RUNNING); |
| 71 | if (w->task) { |
| 72 | raw_spin_lock_irqsave(&head->lock, flags); |
| 73 | __swait_dequeue(w); |
| 74 | raw_spin_unlock_irqrestore(&head->lock, flags); |
| 75 | } |
| 76 | } |
| 77 | EXPORT_SYMBOL_GPL(swait_finish); |
| 78 | |
| 79 | unsigned int |
| 80 | __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) |
| 81 | { |
| 82 | struct swaiter *curr, *next; |
| 83 | int woken = 0; |
| 84 | |
| 85 | list_for_each_entry_safe(curr, next, &head->list, node) { |
| 86 | if (wake_up_state(curr->task, state)) { |
| 87 | __swait_dequeue(curr); |
| 88 | /* |
| 89 | * The waiting task can free the waiter as |
| 90 | * soon as curr->task = NULL is written, |
| 91 | * without taking any locks. A memory barrier |
| 92 | * is required here to prevent the following |
| 93 | * store to curr->task from getting ahead of |
| 94 | * the dequeue operation. |
| 95 | */ |
| 96 | smp_wmb(); |
| 97 | curr->task = NULL; |
| 98 | if (++woken == num) |
| 99 | break; |
| 100 | } |
| 101 | } |
| 102 | return woken; |
| 103 | } |
| 104 | |
| 105 | unsigned int |
| 106 | __swait_wake(struct swait_head *head, unsigned int state, unsigned int num) |
| 107 | { |
| 108 | unsigned long flags; |
| 109 | int woken; |
| 110 | |
| 111 | if (!swait_head_has_waiters(head)) |
| 112 | return 0; |
| 113 | |
| 114 | raw_spin_lock_irqsave(&head->lock, flags); |
| 115 | woken = __swait_wake_locked(head, state, num); |
| 116 | raw_spin_unlock_irqrestore(&head->lock, flags); |
| 117 | return woken; |
| 118 | } |
| 119 | EXPORT_SYMBOL_GPL(__swait_wake); |