blob: 538d45a556ea60d4a4bf118e7fefdb9cc01446fe [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * linux/kernel/time/tick-common.c
3 *
4 * This file contains the base functions to manage periodic tick
5 * related events.
6 *
7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10 *
11 * This code is licenced under the GPL version 2. For details see
12 * kernel-base/COPYING.
13 */
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21
22#include <asm/irq_regs.h>
23
24#include "tick-internal.h"
25
26/*
27 * Tick devices
28 */
29DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
30/*
31 * Tick next event: keeps track of the tick time
32 */
33ktime_t tick_next_period;
34ktime_t tick_period;
35int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
36static DEFINE_RAW_SPINLOCK(tick_device_lock);
37
38/*
39 * Debugging: see timer_list.c
40 */
41struct tick_device *tick_get_device(int cpu)
42{
43 return &per_cpu(tick_cpu_device, cpu);
44}
45
46/**
47 * tick_is_oneshot_available - check for a oneshot capable event device
48 */
49int tick_is_oneshot_available(void)
50{
51 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
52
53 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
54 return 0;
55 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
56 return 1;
57 return tick_broadcast_oneshot_available();
58}
59
60/*
61 * Periodic tick
62 */
63static void tick_periodic(int cpu)
64{
65 if (tick_do_timer_cpu == cpu) {
66 raw_spin_lock(&xtime_lock);
67 write_seqcount_begin(&xtime_seq);
68
69 /* Keep track of the next tick event */
70 tick_next_period = ktime_add(tick_next_period, tick_period);
71
72 do_timer(1);
73 write_seqcount_end(&xtime_seq);
74 raw_spin_unlock(&xtime_lock);
75 }
76
77 update_process_times(user_mode(get_irq_regs()));
78 profile_tick(CPU_PROFILING);
79}
80
81/*
82 * Event handler for periodic ticks
83 */
84void tick_handle_periodic(struct clock_event_device *dev)
85{
86 int cpu = smp_processor_id();
87 ktime_t next;
88
89 tick_periodic(cpu);
90
91 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
92 return;
93 /*
94 * Setup the next period for devices, which do not have
95 * periodic mode:
96 */
97 next = ktime_add(dev->next_event, tick_period);
98 for (;;) {
99 if (!clockevents_program_event(dev, next, false))
100 return;
101 /*
102 * Have to be careful here. If we're in oneshot mode,
103 * before we call tick_periodic() in a loop, we need
104 * to be sure we're using a real hardware clocksource.
105 * Otherwise we could get trapped in an infinite
106 * loop, as the tick_periodic() increments jiffies,
107 * when then will increment time, posibly causing
108 * the loop to trigger again and again.
109 */
110 if (timekeeping_valid_for_hres())
111 tick_periodic(cpu);
112 next = ktime_add(next, tick_period);
113 }
114}
115
116/*
117 * Setup the device for a periodic tick
118 */
119void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
120{
121 tick_set_periodic_handler(dev, broadcast);
122
123 /* Broadcast setup ? */
124 if (!tick_device_is_functional(dev))
125 return;
126
127 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
128 !tick_broadcast_oneshot_active()) {
129 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
130 } else {
131 unsigned long seq;
132 ktime_t next;
133
134 do {
135 seq = read_seqcount_begin(&xtime_seq);
136 next = tick_next_period;
137 } while (read_seqcount_retry(&xtime_seq, seq));
138
139 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
140
141 for (;;) {
142 if (!clockevents_program_event(dev, next, false))
143 return;
144 next = ktime_add(next, tick_period);
145 }
146 }
147}
148
149/*
150 * Setup the tick device
151 */
152static void tick_setup_device(struct tick_device *td,
153 struct clock_event_device *newdev, int cpu,
154 const struct cpumask *cpumask)
155{
156 ktime_t next_event;
157 void (*handler)(struct clock_event_device *) = NULL;
158
159 /*
160 * First device setup ?
161 */
162 if (!td->evtdev) {
163 /*
164 * If no cpu took the do_timer update, assign it to
165 * this cpu:
166 */
167 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
168 tick_do_timer_cpu = cpu;
169 tick_next_period = ktime_get();
170 tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
171 }
172
173 /*
174 * Startup in periodic mode first.
175 */
176 td->mode = TICKDEV_MODE_PERIODIC;
177 } else {
178 handler = td->evtdev->event_handler;
179 next_event = td->evtdev->next_event;
180 td->evtdev->event_handler = clockevents_handle_noop;
181 }
182
183 td->evtdev = newdev;
184
185 /*
186 * When the device is not per cpu, pin the interrupt to the
187 * current cpu:
188 */
189 if (!cpumask_equal(newdev->cpumask, cpumask))
190 irq_set_affinity(newdev->irq, cpumask);
191
192 /*
193 * When global broadcasting is active, check if the current
194 * device is registered as a placeholder for broadcast mode.
195 * This allows us to handle this x86 misfeature in a generic
196 * way.
197 */
198 if (tick_device_uses_broadcast(newdev, cpu))
199 return;
200
201 if (td->mode == TICKDEV_MODE_PERIODIC)
202 tick_setup_periodic(newdev, 0);
203 else
204 tick_setup_oneshot(newdev, handler, next_event);
205}
206
207/*
208 * Check, if the new registered device should be used.
209 */
210static int tick_check_new_device(struct clock_event_device *newdev)
211{
212 struct clock_event_device *curdev;
213 struct tick_device *td;
214 int cpu, ret = NOTIFY_OK;
215 unsigned long flags;
216
217 raw_spin_lock_irqsave(&tick_device_lock, flags);
218
219 cpu = smp_processor_id();
220 if (!cpumask_test_cpu(cpu, newdev->cpumask))
221 goto out_bc;
222
223 td = &per_cpu(tick_cpu_device, cpu);
224 curdev = td->evtdev;
225
226 /* cpu local device ? */
227 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
228
229 /*
230 * If the cpu affinity of the device interrupt can not
231 * be set, ignore it.
232 */
233 if (!irq_can_set_affinity(newdev->irq))
234 goto out_bc;
235
236 /*
237 * If we have a cpu local device already, do not replace it
238 * by a non cpu local device
239 */
240 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
241 goto out_bc;
242 }
243
244 /*
245 * If we have an active device, then check the rating and the oneshot
246 * feature.
247 */
248 if (curdev) {
249 /*
250 * Prefer one shot capable devices !
251 */
252 if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
253 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
254 goto out_bc;
255 /*
256 * Check the rating
257 */
258 if (curdev->rating >= newdev->rating)
259 goto out_bc;
260 }
261
262 /*
263 * Replace the eventually existing device by the new
264 * device. If the current device is the broadcast device, do
265 * not give it back to the clockevents layer !
266 */
267 if (tick_is_broadcast_device(curdev)) {
268 clockevents_shutdown(curdev);
269 curdev = NULL;
270 }
271 clockevents_exchange_device(curdev, newdev);
272 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
273 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
274 tick_oneshot_notify();
275
276 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
277 return NOTIFY_STOP;
278
279out_bc:
280 /*
281 * Can the new device be used as a broadcast device ?
282 */
283 if (tick_check_broadcast_device(newdev))
284 ret = NOTIFY_STOP;
285
286 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
287
288 return ret;
289}
290
291/*
292 * Transfer the do_timer job away from a dying cpu.
293 *
294 * Called with interrupts disabled.
295 */
296static void tick_handover_do_timer(int *cpup)
297{
298 if (*cpup == tick_do_timer_cpu) {
299 int cpu = cpumask_first(cpu_online_mask);
300
301 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
302 TICK_DO_TIMER_NONE;
303 }
304}
305
306/*
307 * Shutdown an event device on a given cpu:
308 *
309 * This is called on a life CPU, when a CPU is dead. So we cannot
310 * access the hardware device itself.
311 * We just set the mode and remove it from the lists.
312 */
313static void tick_shutdown(unsigned int *cpup)
314{
315 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
316 struct clock_event_device *dev = td->evtdev;
317 unsigned long flags;
318
319 raw_spin_lock_irqsave(&tick_device_lock, flags);
320 td->mode = TICKDEV_MODE_PERIODIC;
321 if (dev) {
322 /*
323 * Prevent that the clock events layer tries to call
324 * the set mode function!
325 */
326 dev->mode = CLOCK_EVT_MODE_UNUSED;
327 clockevents_exchange_device(dev, NULL);
328 dev->event_handler = clockevents_handle_noop;
329 td->evtdev = NULL;
330 }
331 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
332}
333
334static void tick_suspend(void)
335{
336 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
337 unsigned long flags;
338
339 raw_spin_lock_irqsave(&tick_device_lock, flags);
340 clockevents_shutdown(td->evtdev);
341 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
342}
343
344static void tick_resume(void)
345{
346 struct tick_device *td = &__get_cpu_var(tick_cpu_device);
347 unsigned long flags;
348 int broadcast = tick_resume_broadcast();
349
350 raw_spin_lock_irqsave(&tick_device_lock, flags);
351 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
352
353 if (!broadcast) {
354 if (td->mode == TICKDEV_MODE_PERIODIC)
355 tick_setup_periodic(td->evtdev, 0);
356 else
357 tick_resume_oneshot();
358 }
359 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
360}
361
362/*
363 * Notification about clock event devices
364 */
365static int tick_notify(struct notifier_block *nb, unsigned long reason,
366 void *dev)
367{
368 switch (reason) {
369
370 case CLOCK_EVT_NOTIFY_ADD:
371 return tick_check_new_device(dev);
372
373 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
374 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
375 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
376 tick_broadcast_on_off(reason, dev);
377 break;
378
379 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
380 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
381 tick_broadcast_oneshot_control(reason);
382 break;
383
384 case CLOCK_EVT_NOTIFY_CPU_DYING:
385 tick_handover_do_timer(dev);
386 break;
387
388 case CLOCK_EVT_NOTIFY_CPU_DEAD:
389 tick_shutdown_broadcast_oneshot(dev);
390 tick_shutdown_broadcast(dev);
391 tick_shutdown(dev);
392 break;
393
394 case CLOCK_EVT_NOTIFY_SUSPEND:
395 tick_suspend();
396 tick_suspend_broadcast();
397 break;
398
399 case CLOCK_EVT_NOTIFY_RESUME:
400 tick_resume();
401 break;
402
403 default:
404 break;
405 }
406
407 return NOTIFY_OK;
408}
409
410static struct notifier_block tick_notifier = {
411 .notifier_call = tick_notify,
412};
413
414/**
415 * tick_init - initialize the tick control
416 *
417 * Register the notifier with the clockevents framework
418 */
419void __init tick_init(void)
420{
421 clockevents_register_notifier(&tick_notifier);
422}