blob: eacd2634a236260023dfd7fa542f0201ff996692 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * fs/timerfd.c
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 *
6 *
7 * Thanks to Thomas Gleixner for code reviews and useful comments.
8 *
9 */
10
11#include <linux/file.h>
12#include <linux/poll.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20#include <linux/time.h>
21#include <linux/hrtimer.h>
22#include <linux/anon_inodes.h>
23#include <linux/timerfd.h>
24#include <linux/syscalls.h>
25#include <linux/rcupdate.h>
26
27struct timerfd_ctx {
28 struct hrtimer tmr;
29 ktime_t tintv;
30 ktime_t moffs;
31 wait_queue_head_t wqh;
32 u64 ticks;
33 int expired;
34 int clockid;
35 struct rcu_head rcu;
36 struct list_head clist;
37 spinlock_t cancel_lock;
38 bool might_cancel;
39};
40
41static LIST_HEAD(cancel_list);
42static DEFINE_SPINLOCK(cancel_lock);
43
44/*
45 * This gets called when the timer event triggers. We set the "expired"
46 * flag, but we do not re-arm the timer (in case it's necessary,
47 * tintv.tv64 != 0) until the timer is accessed.
48 */
49static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
50{
51 struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, tmr);
52 unsigned long flags;
53
54 spin_lock_irqsave(&ctx->wqh.lock, flags);
55 ctx->expired = 1;
56 ctx->ticks++;
57 wake_up_locked(&ctx->wqh);
58 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
59
60 return HRTIMER_NORESTART;
61}
62
63/*
64 * Called when the clock was set to cancel the timers in the cancel
65 * list. This will wake up processes waiting on these timers. The
66 * wake-up requires ctx->ticks to be non zero, therefore we increment
67 * it before calling wake_up_locked().
68 */
69void timerfd_clock_was_set(void)
70{
71 ktime_t moffs = ktime_get_monotonic_offset();
72 struct timerfd_ctx *ctx;
73 unsigned long flags;
74
75 rcu_read_lock();
76 list_for_each_entry_rcu(ctx, &cancel_list, clist) {
77 if (!ctx->might_cancel)
78 continue;
79 spin_lock_irqsave(&ctx->wqh.lock, flags);
80 if (ctx->moffs.tv64 != moffs.tv64) {
81 ctx->moffs.tv64 = KTIME_MAX;
82 ctx->ticks++;
83 wake_up_locked(&ctx->wqh);
84 }
85 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
86 }
87 rcu_read_unlock();
88}
89
90static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
91{
92 if (ctx->might_cancel) {
93 ctx->might_cancel = false;
94 spin_lock(&cancel_lock);
95 list_del_rcu(&ctx->clist);
96 spin_unlock(&cancel_lock);
97 }
98}
99static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
100{
101 spin_lock(&ctx->cancel_lock);
102 __timerfd_remove_cancel(ctx);
103 spin_unlock(&ctx->cancel_lock);
104}
105
106static bool timerfd_canceled(struct timerfd_ctx *ctx)
107{
108 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
109 return false;
110 ctx->moffs = ktime_get_monotonic_offset();
111 return true;
112}
113
114static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
115{
116 spin_lock(&ctx->cancel_lock);
117 if (ctx->clockid == CLOCK_REALTIME && (flags & TFD_TIMER_ABSTIME) &&
118 (flags & TFD_TIMER_CANCEL_ON_SET)) {
119 if (!ctx->might_cancel) {
120 ctx->might_cancel = true;
121 spin_lock(&cancel_lock);
122 list_add_rcu(&ctx->clist, &cancel_list);
123 spin_unlock(&cancel_lock);
124 }
125 } else {
126 __timerfd_remove_cancel(ctx);
127 }
128 spin_unlock(&ctx->cancel_lock);
129}
130
131static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
132{
133 ktime_t remaining;
134
135 remaining = hrtimer_expires_remaining(&ctx->tmr);
136 return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
137}
138
139static int timerfd_setup(struct timerfd_ctx *ctx, int flags,
140 const struct itimerspec *ktmr)
141{
142 enum hrtimer_mode htmode;
143 ktime_t texp;
144 int clockid = ctx->clockid;
145
146 htmode = (flags & TFD_TIMER_ABSTIME) ?
147 HRTIMER_MODE_ABS: HRTIMER_MODE_REL;
148
149 texp = timespec_to_ktime(ktmr->it_value);
150 ctx->expired = 0;
151 ctx->ticks = 0;
152 ctx->tintv = timespec_to_ktime(ktmr->it_interval);
153 hrtimer_init(&ctx->tmr, clockid, htmode);
154 hrtimer_set_expires(&ctx->tmr, texp);
155 ctx->tmr.function = timerfd_tmrproc;
156 if (texp.tv64 != 0) {
157 hrtimer_start(&ctx->tmr, texp, htmode);
158 if (timerfd_canceled(ctx))
159 return -ECANCELED;
160 }
161 return 0;
162}
163
164static int timerfd_release(struct inode *inode, struct file *file)
165{
166 struct timerfd_ctx *ctx = file->private_data;
167
168 timerfd_remove_cancel(ctx);
169 hrtimer_cancel(&ctx->tmr);
170 kfree_rcu(ctx, rcu);
171 return 0;
172}
173
174static unsigned int timerfd_poll(struct file *file, poll_table *wait)
175{
176 struct timerfd_ctx *ctx = file->private_data;
177 unsigned int events = 0;
178 unsigned long flags;
179
180 poll_wait(file, &ctx->wqh, wait);
181
182 spin_lock_irqsave(&ctx->wqh.lock, flags);
183 if (ctx->ticks)
184 events |= POLLIN;
185 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
186
187 return events;
188}
189
190static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
191 loff_t *ppos)
192{
193 struct timerfd_ctx *ctx = file->private_data;
194 ssize_t res;
195 u64 ticks = 0;
196
197 if (count < sizeof(ticks))
198 return -EINVAL;
199 spin_lock_irq(&ctx->wqh.lock);
200 if (file->f_flags & O_NONBLOCK)
201 res = -EAGAIN;
202 else
203 res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks);
204
205 /*
206 * If clock has changed, we do not care about the
207 * ticks and we do not rearm the timer. Userspace must
208 * reevaluate anyway.
209 */
210 if (timerfd_canceled(ctx)) {
211 ctx->ticks = 0;
212 ctx->expired = 0;
213 res = -ECANCELED;
214 }
215
216 if (ctx->ticks) {
217 ticks = ctx->ticks;
218
219 if (ctx->expired && ctx->tintv.tv64) {
220 /*
221 * If tintv.tv64 != 0, this is a periodic timer that
222 * needs to be re-armed. We avoid doing it in the timer
223 * callback to avoid DoS attacks specifying a very
224 * short timer period.
225 */
226 ticks += hrtimer_forward_now(&ctx->tmr,
227 ctx->tintv) - 1;
228 hrtimer_restart(&ctx->tmr);
229 }
230 ctx->expired = 0;
231 ctx->ticks = 0;
232 }
233 spin_unlock_irq(&ctx->wqh.lock);
234 if (ticks)
235 res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
236 return res;
237}
238
239static const struct file_operations timerfd_fops = {
240 .release = timerfd_release,
241 .poll = timerfd_poll,
242 .read = timerfd_read,
243 .llseek = noop_llseek,
244};
245
246static struct file *timerfd_fget(int fd)
247{
248 struct file *file;
249
250 file = fget(fd);
251 if (!file)
252 return ERR_PTR(-EBADF);
253 if (file->f_op != &timerfd_fops) {
254 fput(file);
255 return ERR_PTR(-EINVAL);
256 }
257
258 return file;
259}
260
261SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
262{
263 int ufd;
264 struct timerfd_ctx *ctx;
265
266 /* Check the TFD_* constants for consistency. */
267 BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
268 BUILD_BUG_ON(TFD_NONBLOCK != O_NONBLOCK);
269
270 if ((flags & ~TFD_CREATE_FLAGS) ||
271 (clockid != CLOCK_MONOTONIC &&
272 clockid != CLOCK_REALTIME))
273 return -EINVAL;
274
275 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
276 if (!ctx)
277 return -ENOMEM;
278
279 init_waitqueue_head(&ctx->wqh);
280 spin_lock_init(&ctx->cancel_lock);
281 ctx->clockid = clockid;
282 hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
283 ctx->moffs = ktime_get_monotonic_offset();
284
285 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
286 O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
287 if (ufd < 0)
288 kfree(ctx);
289
290 return ufd;
291}
292
293SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
294 const struct itimerspec __user *, utmr,
295 struct itimerspec __user *, otmr)
296{
297 struct file *file;
298 struct timerfd_ctx *ctx;
299 struct itimerspec ktmr, kotmr;
300 int ret;
301
302 if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
303 return -EFAULT;
304
305 if ((flags & ~TFD_SETTIME_FLAGS) ||
306 !timespec_valid(&ktmr.it_value) ||
307 !timespec_valid(&ktmr.it_interval))
308 return -EINVAL;
309
310 file = timerfd_fget(ufd);
311 if (IS_ERR(file))
312 return PTR_ERR(file);
313 ctx = file->private_data;
314
315 timerfd_setup_cancel(ctx, flags);
316
317 /*
318 * We need to stop the existing timer before reprogramming
319 * it to the new values.
320 */
321 for (;;) {
322 spin_lock_irq(&ctx->wqh.lock);
323 if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
324 break;
325 spin_unlock_irq(&ctx->wqh.lock);
326 hrtimer_wait_for_timer(&ctx->tmr);
327 }
328
329 /*
330 * If the timer is expired and it's periodic, we need to advance it
331 * because the caller may want to know the previous expiration time.
332 * We do not update "ticks" and "expired" since the timer will be
333 * re-programmed again in the following timerfd_setup() call.
334 */
335 if (ctx->expired && ctx->tintv.tv64)
336 hrtimer_forward_now(&ctx->tmr, ctx->tintv);
337
338 kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
339 kotmr.it_interval = ktime_to_timespec(ctx->tintv);
340
341 /*
342 * Re-program the timer to the new value ...
343 */
344 ret = timerfd_setup(ctx, flags, &ktmr);
345
346 spin_unlock_irq(&ctx->wqh.lock);
347 fput(file);
348 if (otmr && copy_to_user(otmr, &kotmr, sizeof(kotmr)))
349 return -EFAULT;
350
351 return ret;
352}
353
354SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
355{
356 struct file *file;
357 struct timerfd_ctx *ctx;
358 struct itimerspec kotmr;
359
360 file = timerfd_fget(ufd);
361 if (IS_ERR(file))
362 return PTR_ERR(file);
363 ctx = file->private_data;
364
365 spin_lock_irq(&ctx->wqh.lock);
366 if (ctx->expired && ctx->tintv.tv64) {
367 ctx->expired = 0;
368 ctx->ticks +=
369 hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
370 hrtimer_restart(&ctx->tmr);
371 }
372 kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
373 kotmr.it_interval = ktime_to_timespec(ctx->tintv);
374 spin_unlock_irq(&ctx->wqh.lock);
375 fput(file);
376
377 return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
378}
379