b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> |
| 4 | */ |
| 5 | |
| 6 | /* |
| 7 | * Basic idea behind the notification queue: An fsnotify group (like inotify) |
| 8 | * sends the userspace notification about events asynchronously some time after |
| 9 | * the event happened. When inotify gets an event it will need to add that |
| 10 | * event to the group notify queue. Since a single event might need to be on |
| 11 | * multiple group's notification queues we can't add the event directly to each |
| 12 | * queue and instead add a small "event_holder" to each queue. This event_holder |
| 13 | * has a pointer back to the original event. Since the majority of events are |
| 14 | * going to end up on one, and only one, notification queue we embed one |
| 15 | * event_holder into each event. This means we have a single allocation instead |
| 16 | * of always needing two. If the embedded event_holder is already in use by |
| 17 | * another group a new event_holder (from fsnotify_event_holder_cachep) will be |
| 18 | * allocated and used. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/fs.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/list.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/mount.h> |
| 27 | #include <linux/mutex.h> |
| 28 | #include <linux/namei.h> |
| 29 | #include <linux/path.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/spinlock.h> |
| 32 | |
| 33 | #include <linux/atomic.h> |
| 34 | |
| 35 | #include <linux/fsnotify_backend.h> |
| 36 | #include "fsnotify.h" |
| 37 | |
| 38 | static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); |
| 39 | |
| 40 | /** |
| 41 | * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. |
| 42 | * Called from fsnotify_move, which is inlined into filesystem modules. |
| 43 | */ |
| 44 | u32 fsnotify_get_cookie(void) |
| 45 | { |
| 46 | return atomic_inc_return(&fsnotify_sync_cookie); |
| 47 | } |
| 48 | EXPORT_SYMBOL_GPL(fsnotify_get_cookie); |
| 49 | |
| 50 | /* return true if the notify queue is empty, false otherwise */ |
| 51 | bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) |
| 52 | { |
| 53 | assert_spin_locked(&group->notification_lock); |
| 54 | return list_empty(&group->notification_list) ? true : false; |
| 55 | } |
| 56 | |
| 57 | void fsnotify_destroy_event(struct fsnotify_group *group, |
| 58 | struct fsnotify_event *event) |
| 59 | { |
| 60 | /* Overflow events are per-group and we don't want to free them */ |
| 61 | if (!event || event == group->overflow_event) |
| 62 | return; |
| 63 | /* |
| 64 | * If the event is still queued, we have a problem... Do an unreliable |
| 65 | * lockless check first to avoid locking in the common case. The |
| 66 | * locking may be necessary for permission events which got removed |
| 67 | * from the list by a different CPU than the one freeing the event. |
| 68 | */ |
| 69 | if (!list_empty(&event->list)) { |
| 70 | spin_lock(&group->notification_lock); |
| 71 | WARN_ON(!list_empty(&event->list)); |
| 72 | spin_unlock(&group->notification_lock); |
| 73 | } |
| 74 | group->ops->free_event(event); |
| 75 | } |
| 76 | |
| 77 | /* |
| 78 | * Add an event to the group notification queue. The group can later pull this |
| 79 | * event off the queue to deal with. The function returns 0 if the event was |
| 80 | * added to the queue, 1 if the event was merged with some other queued event, |
| 81 | * 2 if the event was not queued - either the queue of events has overflown |
| 82 | * or the group is shutting down. |
| 83 | */ |
| 84 | int fsnotify_add_event(struct fsnotify_group *group, |
| 85 | struct fsnotify_event *event, |
| 86 | int (*merge)(struct list_head *, |
| 87 | struct fsnotify_event *)) |
| 88 | { |
| 89 | int ret = 0; |
| 90 | struct list_head *list = &group->notification_list; |
| 91 | |
| 92 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
| 93 | |
| 94 | spin_lock(&group->notification_lock); |
| 95 | |
| 96 | if (group->shutdown) { |
| 97 | spin_unlock(&group->notification_lock); |
| 98 | return 2; |
| 99 | } |
| 100 | |
| 101 | if (event == group->overflow_event || |
| 102 | group->q_len >= group->max_events) { |
| 103 | ret = 2; |
| 104 | /* Queue overflow event only if it isn't already queued */ |
| 105 | if (!list_empty(&group->overflow_event->list)) { |
| 106 | spin_unlock(&group->notification_lock); |
| 107 | return ret; |
| 108 | } |
| 109 | event = group->overflow_event; |
| 110 | goto queue; |
| 111 | } |
| 112 | |
| 113 | if (!list_empty(list) && merge) { |
| 114 | ret = merge(list, event); |
| 115 | if (ret) { |
| 116 | spin_unlock(&group->notification_lock); |
| 117 | return ret; |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | queue: |
| 122 | group->q_len++; |
| 123 | list_add_tail(&event->list, list); |
| 124 | spin_unlock(&group->notification_lock); |
| 125 | |
| 126 | wake_up(&group->notification_waitq); |
| 127 | kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); |
| 128 | return ret; |
| 129 | } |
| 130 | |
| 131 | void fsnotify_remove_queued_event(struct fsnotify_group *group, |
| 132 | struct fsnotify_event *event) |
| 133 | { |
| 134 | assert_spin_locked(&group->notification_lock); |
| 135 | /* |
| 136 | * We need to init list head for the case of overflow event so that |
| 137 | * check in fsnotify_add_event() works |
| 138 | */ |
| 139 | list_del_init(&event->list); |
| 140 | group->q_len--; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * Remove and return the first event from the notification list. It is the |
| 145 | * responsibility of the caller to destroy the obtained event |
| 146 | */ |
| 147 | struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group) |
| 148 | { |
| 149 | struct fsnotify_event *event; |
| 150 | |
| 151 | assert_spin_locked(&group->notification_lock); |
| 152 | |
| 153 | pr_debug("%s: group=%p\n", __func__, group); |
| 154 | |
| 155 | event = list_first_entry(&group->notification_list, |
| 156 | struct fsnotify_event, list); |
| 157 | fsnotify_remove_queued_event(group, event); |
| 158 | return event; |
| 159 | } |
| 160 | |
| 161 | /* |
| 162 | * This will not remove the event, that must be done with |
| 163 | * fsnotify_remove_first_event() |
| 164 | */ |
| 165 | struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group) |
| 166 | { |
| 167 | assert_spin_locked(&group->notification_lock); |
| 168 | |
| 169 | return list_first_entry(&group->notification_list, |
| 170 | struct fsnotify_event, list); |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * Called when a group is being torn down to clean up any outstanding |
| 175 | * event notifications. |
| 176 | */ |
| 177 | void fsnotify_flush_notify(struct fsnotify_group *group) |
| 178 | { |
| 179 | struct fsnotify_event *event; |
| 180 | |
| 181 | spin_lock(&group->notification_lock); |
| 182 | while (!fsnotify_notify_queue_is_empty(group)) { |
| 183 | event = fsnotify_remove_first_event(group); |
| 184 | spin_unlock(&group->notification_lock); |
| 185 | fsnotify_destroy_event(group, event); |
| 186 | spin_lock(&group->notification_lock); |
| 187 | } |
| 188 | spin_unlock(&group->notification_lock); |
| 189 | } |