rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2008-2014 Travis Geiselbrecht |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining |
| 5 | * a copy of this software and associated documentation files |
| 6 | * (the "Software"), to deal in the Software without restriction, |
| 7 | * including without limitation the rights to use, copy, modify, merge, |
| 8 | * publish, distribute, sublicense, and/or sell copies of the Software, |
| 9 | * and to permit persons to whom the Software is furnished to do so, |
| 10 | * subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be |
| 13 | * included in all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 16 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 17 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 18 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| 19 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 20 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 21 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
| 24 | /** |
| 25 | * @file |
| 26 | * @brief Kernel timer subsystem |
| 27 | * @defgroup timer Timers |
| 28 | * |
| 29 | * The timer subsystem allows functions to be scheduled for later |
| 30 | * execution. Each timer object is used to cause one function to |
| 31 | * be executed at a later time. |
| 32 | * |
| 33 | * Timer callback functions are called in interrupt context. |
| 34 | * |
| 35 | * @{ |
| 36 | */ |
| 37 | #include <debug.h> |
| 38 | #include <trace.h> |
| 39 | #include <assert.h> |
| 40 | #include <list.h> |
| 41 | #include <kernel/thread.h> |
| 42 | #include <kernel/timer.h> |
| 43 | #include <kernel/debug.h> |
| 44 | #include <kernel/spinlock.h> |
| 45 | #include <platform/timer.h> |
| 46 | #include <platform.h> |
| 47 | |
| 48 | #define LOCAL_TRACE 0 |
| 49 | |
| 50 | spin_lock_t timer_lock; |
| 51 | |
| 52 | struct timer_state { |
| 53 | struct list_node timer_queue; |
| 54 | } __CPU_ALIGN; |
| 55 | |
| 56 | static struct timer_state timers[SMP_MAX_CPUS]; |
| 57 | |
| 58 | static enum handler_return timer_tick(void *arg, lk_time_t now); |
| 59 | |
| 60 | /** |
| 61 | * @brief Initialize a timer object |
| 62 | */ |
| 63 | void timer_initialize(timer_t *timer) |
| 64 | { |
| 65 | *timer = (timer_t)TIMER_INITIAL_VALUE(*timer); |
| 66 | } |
| 67 | |
| 68 | static void insert_timer_in_queue(uint cpu, timer_t *timer) |
| 69 | { |
| 70 | timer_t *entry; |
| 71 | |
| 72 | DEBUG_ASSERT(arch_ints_disabled()); |
| 73 | |
| 74 | LTRACEF("timer %p, cpu %u, scheduled %u, periodic %u\n", timer, cpu, timer->scheduled_time, timer->periodic_time); |
| 75 | |
| 76 | list_for_every_entry(&timers[cpu].timer_queue, entry, timer_t, node) { |
| 77 | if (TIME_GT(entry->scheduled_time, timer->scheduled_time)) { |
| 78 | list_add_before(&entry->node, &timer->node); |
| 79 | return; |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | /* walked off the end of the list */ |
| 84 | list_add_tail(&timers[cpu].timer_queue, &timer->node); |
| 85 | } |
| 86 | |
| 87 | static void timer_set(timer_t *timer, lk_time_t delay, lk_time_t period, timer_callback callback, void *arg) |
| 88 | { |
| 89 | lk_time_t now; |
| 90 | |
| 91 | LTRACEF("timer %p, delay %u, period %u, callback %p, arg %p\n", timer, delay, period, callback, arg); |
| 92 | |
| 93 | DEBUG_ASSERT(timer->magic == TIMER_MAGIC); |
| 94 | |
| 95 | if (list_in_list(&timer->node)) { |
| 96 | panic("timer %p already in list\n", timer); |
| 97 | } |
| 98 | |
| 99 | now = current_time(); |
| 100 | timer->scheduled_time = now + delay; |
| 101 | timer->periodic_time = period; |
| 102 | timer->callback = callback; |
| 103 | timer->arg = arg; |
| 104 | |
| 105 | LTRACEF("scheduled time %u\n", timer->scheduled_time); |
| 106 | |
| 107 | spin_lock_saved_state_t state; |
| 108 | spin_lock_irqsave(&timer_lock, state); |
| 109 | |
| 110 | uint cpu = arch_curr_cpu_num(); |
| 111 | insert_timer_in_queue(cpu, timer); |
| 112 | |
| 113 | #if PLATFORM_HAS_DYNAMIC_TIMER |
| 114 | if (list_peek_head_type(&timers[cpu].timer_queue, timer_t, node) == timer) { |
| 115 | /* we just modified the head of the timer queue */ |
| 116 | LTRACEF("setting new timer for %u msecs\n", delay); |
| 117 | platform_set_oneshot_timer(timer_tick, NULL, delay); |
| 118 | } |
| 119 | #endif |
| 120 | |
| 121 | spin_unlock_irqrestore(&timer_lock, state); |
| 122 | } |
| 123 | |
| 124 | /** |
| 125 | * @brief Set up a timer that executes once |
| 126 | * |
| 127 | * This function specifies a callback function to be called after a specified |
| 128 | * delay. The function will be called one time. |
| 129 | * |
| 130 | * @param timer The timer to use |
| 131 | * @param delay The delay, in ms, before the timer is executed |
| 132 | * @param callback The function to call when the timer expires |
| 133 | * @param arg The argument to pass to the callback |
| 134 | * |
| 135 | * The timer function is declared as: |
| 136 | * enum handler_return callback(struct timer *, lk_time_t now, void *arg) { ... } |
| 137 | */ |
| 138 | void timer_set_oneshot(timer_t *timer, lk_time_t delay, timer_callback callback, void *arg) |
| 139 | { |
| 140 | if (delay == 0) |
| 141 | delay = 1; |
| 142 | timer_set(timer, delay, 0, callback, arg); |
| 143 | } |
| 144 | |
| 145 | /** |
| 146 | * @brief Set up a timer that executes repeatedly |
| 147 | * |
| 148 | * This function specifies a callback function to be called after a specified |
| 149 | * delay. The function will be called repeatedly. |
| 150 | * |
| 151 | * @param timer The timer to use |
| 152 | * @param delay The delay, in ms, before the timer is executed |
| 153 | * @param callback The function to call when the timer expires |
| 154 | * @param arg The argument to pass to the callback |
| 155 | * |
| 156 | * The timer function is declared as: |
| 157 | * enum handler_return callback(struct timer *, lk_time_t now, void *arg) { ... } |
| 158 | */ |
| 159 | void timer_set_periodic(timer_t *timer, lk_time_t period, timer_callback callback, void *arg) |
| 160 | { |
| 161 | if (period == 0) |
| 162 | period = 1; |
| 163 | timer_set(timer, period, period, callback, arg); |
| 164 | } |
| 165 | |
| 166 | /** |
| 167 | * @brief Cancel a pending timer |
| 168 | */ |
| 169 | void timer_cancel(timer_t *timer) |
| 170 | { |
| 171 | DEBUG_ASSERT(timer->magic == TIMER_MAGIC); |
| 172 | |
| 173 | spin_lock_saved_state_t state; |
| 174 | spin_lock_irqsave(&timer_lock, state); |
| 175 | |
| 176 | #if PLATFORM_HAS_DYNAMIC_TIMER |
| 177 | uint cpu = arch_curr_cpu_num(); |
| 178 | |
| 179 | timer_t *oldhead = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node); |
| 180 | #endif |
| 181 | |
| 182 | if (list_in_list(&timer->node)) |
| 183 | list_delete(&timer->node); |
| 184 | |
| 185 | /* to keep it from being reinserted into the queue if called from |
| 186 | * periodic timer callback. |
| 187 | */ |
| 188 | timer->periodic_time = 0; |
| 189 | timer->callback = NULL; |
| 190 | timer->arg = NULL; |
| 191 | |
| 192 | #if PLATFORM_HAS_DYNAMIC_TIMER |
| 193 | /* see if we've just modified the head of the timer queue */ |
| 194 | timer_t *newhead = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node); |
| 195 | if (newhead == NULL) { |
| 196 | LTRACEF("clearing old hw timer, nothing in the queue\n"); |
| 197 | platform_stop_timer(); |
| 198 | } else if (newhead != oldhead) { |
| 199 | lk_time_t delay; |
| 200 | lk_time_t now = current_time(); |
| 201 | |
| 202 | if (TIME_LT(newhead->scheduled_time, now)) |
| 203 | delay = 0; |
| 204 | else |
| 205 | delay = newhead->scheduled_time - now; |
| 206 | |
| 207 | LTRACEF("setting new timer to %u\n", (uint) delay); |
| 208 | platform_set_oneshot_timer(timer_tick, NULL, delay); |
| 209 | } |
| 210 | #endif |
| 211 | |
| 212 | spin_unlock_irqrestore(&timer_lock, state); |
| 213 | } |
| 214 | |
| 215 | /* called at interrupt time to process any pending timers */ |
| 216 | static enum handler_return timer_tick(void *arg, lk_time_t now) |
| 217 | { |
| 218 | timer_t *timer; |
| 219 | enum handler_return ret = INT_NO_RESCHEDULE; |
| 220 | |
| 221 | DEBUG_ASSERT(arch_ints_disabled()); |
| 222 | |
| 223 | THREAD_STATS_INC(timer_ints); |
| 224 | // KEVLOG_TIMER_TICK(); // enable only if necessary |
| 225 | |
| 226 | uint cpu = arch_curr_cpu_num(); |
| 227 | |
| 228 | LTRACEF("cpu %u now %u, sp %p\n", cpu, now, __GET_FRAME()); |
| 229 | |
| 230 | spin_lock(&timer_lock); |
| 231 | |
| 232 | for (;;) { |
| 233 | /* see if there's an event to process */ |
| 234 | timer = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node); |
| 235 | if (likely(timer == 0)) |
| 236 | break; |
| 237 | LTRACEF("next item on timer queue %p at %u now %u (%p, arg %p)\n", timer, timer->scheduled_time, now, timer->callback, timer->arg); |
| 238 | if (likely(TIME_LT(now, timer->scheduled_time))) |
| 239 | break; |
| 240 | |
| 241 | /* process it */ |
| 242 | LTRACEF("timer %p\n", timer); |
| 243 | DEBUG_ASSERT(timer && timer->magic == TIMER_MAGIC); |
| 244 | list_delete(&timer->node); |
| 245 | |
| 246 | /* we pulled it off the list, release the list lock to handle it */ |
| 247 | spin_unlock(&timer_lock); |
| 248 | |
| 249 | LTRACEF("dequeued timer %p, scheduled %u periodic %u\n", timer, timer->scheduled_time, timer->periodic_time); |
| 250 | |
| 251 | THREAD_STATS_INC(timers); |
| 252 | |
| 253 | bool periodic = timer->periodic_time > 0; |
| 254 | |
| 255 | LTRACEF("timer %p firing callback %p, arg %p\n", timer, timer->callback, timer->arg); |
| 256 | KEVLOG_TIMER_CALL(timer->callback, timer->arg); |
| 257 | if (timer->callback(timer, now, timer->arg) == INT_RESCHEDULE) |
| 258 | ret = INT_RESCHEDULE; |
| 259 | |
| 260 | /* it may have been requeued or periodic, grab the lock so we can safely inspect it */ |
| 261 | spin_lock(&timer_lock); |
| 262 | |
| 263 | /* if it was a periodic timer and it hasn't been requeued |
| 264 | * by the callback put it back in the list |
| 265 | */ |
| 266 | if (periodic && !list_in_list(&timer->node) && timer->periodic_time > 0) { |
| 267 | LTRACEF("periodic timer, period %u\n", timer->periodic_time); |
| 268 | timer->scheduled_time = now + timer->periodic_time; |
| 269 | insert_timer_in_queue(cpu, timer); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | #if PLATFORM_HAS_DYNAMIC_TIMER |
| 274 | /* reset the timer to the next event */ |
| 275 | timer = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node); |
| 276 | if (timer) { |
| 277 | /* has to be the case or it would have fired already */ |
| 278 | DEBUG_ASSERT(TIME_GT(timer->scheduled_time, now)); |
| 279 | |
| 280 | lk_time_t delay = timer->scheduled_time - now; |
| 281 | |
| 282 | LTRACEF("setting new timer for %u msecs for event %p\n", (uint)delay, timer); |
| 283 | platform_set_oneshot_timer(timer_tick, NULL, delay); |
| 284 | } |
| 285 | |
| 286 | /* we're done manipulating the timer queue */ |
| 287 | spin_unlock(&timer_lock); |
| 288 | #else |
| 289 | /* release the timer lock before calling the tick handler */ |
| 290 | spin_unlock(&timer_lock); |
| 291 | |
| 292 | /* let the scheduler have a shot to do quantum expiration, etc */ |
| 293 | /* in case of dynamic timer, the scheduler will set up a periodic timer */ |
| 294 | if (thread_timer_tick(NULL, now, NULL) == INT_RESCHEDULE) |
| 295 | ret = INT_RESCHEDULE; |
| 296 | #endif |
| 297 | |
| 298 | return ret; |
| 299 | } |
| 300 | |
| 301 | void timer_init(void) |
| 302 | { |
| 303 | timer_lock = SPIN_LOCK_INITIAL_VALUE; |
| 304 | for (uint i = 0; i < SMP_MAX_CPUS; i++) { |
| 305 | list_initialize(&timers[i].timer_queue); |
| 306 | } |
| 307 | #if !PLATFORM_HAS_DYNAMIC_TIMER |
| 308 | /* register for a periodic timer tick */ |
| 309 | platform_set_periodic_timer(timer_tick, NULL, 10); /* 10ms */ |
| 310 | #endif |
| 311 | } |
| 312 | |
| 313 | /* vim: set noexpandtab */ |
| 314 | |