lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* Kernel thread helper functions. |
| 2 | * Copyright (C) 2004 IBM Corporation, Rusty Russell. |
| 3 | * |
| 4 | * Creation is done via kthreadd, so that we get a clean environment |
| 5 | * even if we're invoked from userspace (think modprobe, hotplug cpu, |
| 6 | * etc.). |
| 7 | */ |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/kthread.h> |
| 10 | #include <linux/completion.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/cpuset.h> |
| 13 | #include <linux/unistd.h> |
| 14 | #include <linux/file.h> |
| 15 | #include <linux/export.h> |
| 16 | #include <linux/mutex.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/freezer.h> |
| 19 | #include <trace/events/sched.h> |
| 20 | |
| 21 | static DEFINE_SPINLOCK(kthread_create_lock); |
| 22 | static LIST_HEAD(kthread_create_list); |
| 23 | struct task_struct *kthreadd_task; |
| 24 | EXPORT_SYMBOL(kthreadd_task); |
| 25 | |
| 26 | struct kthread_create_info |
| 27 | { |
| 28 | /* Information passed to kthread() from kthreadd. */ |
| 29 | int (*threadfn)(void *data); |
| 30 | void *data; |
| 31 | int node; |
| 32 | |
| 33 | /* Result passed back to kthread_create() from kthreadd. */ |
| 34 | struct task_struct *result; |
| 35 | struct completion done; |
| 36 | #ifdef CONFIG_STACK_SIZE |
| 37 | int stack_flags; |
| 38 | #endif |
| 39 | |
| 40 | struct list_head list; |
| 41 | }; |
| 42 | |
| 43 | struct kthread { |
| 44 | int should_stop; |
| 45 | void *data; |
| 46 | struct completion exited; |
| 47 | }; |
| 48 | |
| 49 | #define to_kthread(tsk) \ |
| 50 | container_of((tsk)->vfork_done, struct kthread, exited) |
| 51 | |
| 52 | /** |
| 53 | * kthread_should_stop - should this kthread return now? |
| 54 | * |
| 55 | * When someone calls kthread_stop() on your kthread, it will be woken |
| 56 | * and this will return true. You should then return, and your return |
| 57 | * value will be passed through to kthread_stop(). |
| 58 | */ |
| 59 | int kthread_should_stop(void) |
| 60 | { |
| 61 | return to_kthread(current)->should_stop; |
| 62 | } |
| 63 | EXPORT_SYMBOL(kthread_should_stop); |
| 64 | |
| 65 | /** |
| 66 | * kthread_freezable_should_stop - should this freezable kthread return now? |
| 67 | * @was_frozen: optional out parameter, indicates whether %current was frozen |
| 68 | * |
| 69 | * kthread_should_stop() for freezable kthreads, which will enter |
| 70 | * refrigerator if necessary. This function is safe from kthread_stop() / |
| 71 | * freezer deadlock and freezable kthreads should use this function instead |
| 72 | * of calling try_to_freeze() directly. |
| 73 | */ |
| 74 | bool kthread_freezable_should_stop(bool *was_frozen) |
| 75 | { |
| 76 | bool frozen = false; |
| 77 | |
| 78 | might_sleep(); |
| 79 | |
| 80 | if (unlikely(freezing(current))) |
| 81 | frozen = __refrigerator(true); |
| 82 | |
| 83 | if (was_frozen) |
| 84 | *was_frozen = frozen; |
| 85 | |
| 86 | return kthread_should_stop(); |
| 87 | } |
| 88 | EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); |
| 89 | |
| 90 | /** |
| 91 | * kthread_data - return data value specified on kthread creation |
| 92 | * @task: kthread task in question |
| 93 | * |
| 94 | * Return the data value specified when kthread @task was created. |
| 95 | * The caller is responsible for ensuring the validity of @task when |
| 96 | * calling this function. |
| 97 | */ |
| 98 | void *kthread_data(struct task_struct *task) |
| 99 | { |
| 100 | return to_kthread(task)->data; |
| 101 | } |
| 102 | |
| 103 | static int kthread(void *_create) |
| 104 | { |
| 105 | /* Copy data: it's on kthread's stack */ |
| 106 | struct kthread_create_info *create = _create; |
| 107 | int (*threadfn)(void *data) = create->threadfn; |
| 108 | void *data = create->data; |
| 109 | struct kthread self; |
| 110 | int ret; |
| 111 | |
| 112 | self.should_stop = 0; |
| 113 | self.data = data; |
| 114 | init_completion(&self.exited); |
| 115 | current->vfork_done = &self.exited; |
| 116 | |
| 117 | /* OK, tell user we're spawned, wait for stop or wakeup */ |
| 118 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 119 | create->result = current; |
| 120 | complete(&create->done); |
| 121 | schedule(); |
| 122 | |
| 123 | ret = -EINTR; |
| 124 | if (!self.should_stop) |
| 125 | ret = threadfn(data); |
| 126 | |
| 127 | /* we can't just return, we must preserve "self" on stack */ |
| 128 | do_exit(ret); |
| 129 | } |
| 130 | |
| 131 | /* called from do_fork() to get node information for about to be created task */ |
| 132 | int tsk_fork_get_node(struct task_struct *tsk) |
| 133 | { |
| 134 | #ifdef CONFIG_NUMA |
| 135 | if (tsk == kthreadd_task) |
| 136 | return tsk->pref_node_fork; |
| 137 | #endif |
| 138 | return numa_node_id(); |
| 139 | } |
| 140 | |
| 141 | static void create_kthread(struct kthread_create_info *create) |
| 142 | { |
| 143 | int pid; |
| 144 | |
| 145 | #ifdef CONFIG_NUMA |
| 146 | current->pref_node_fork = create->node; |
| 147 | #endif |
| 148 | |
| 149 | #ifdef CONFIG_STACK_SIZE |
| 150 | if (create->stack_flags) |
| 151 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD |CLONE_8K_STACK); |
| 152 | else |
| 153 | #endif |
| 154 | /* We want our own signal handler (we take no signals by default). */ |
| 155 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); |
| 156 | if (pid < 0) { |
| 157 | create->result = ERR_PTR(pid); |
| 158 | complete(&create->done); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | /** |
| 163 | * kthread_create_on_node - create a kthread. |
| 164 | * @threadfn: the function to run until signal_pending(current). |
| 165 | * @data: data ptr for @threadfn. |
| 166 | * @node: memory node number. |
| 167 | * @namefmt: printf-style name for the thread. |
| 168 | * |
| 169 | * Description: This helper function creates and names a kernel |
| 170 | * thread. The thread will be stopped: use wake_up_process() to start |
| 171 | * it. See also kthread_run(). |
| 172 | * |
| 173 | * If thread is going to be bound on a particular cpu, give its node |
| 174 | * in @node, to get NUMA affinity for kthread stack, or else give -1. |
| 175 | * When woken, the thread will run @threadfn() with @data as its |
| 176 | * argument. @threadfn() can either call do_exit() directly if it is a |
| 177 | * standalone thread for which no one will call kthread_stop(), or |
| 178 | * return when 'kthread_should_stop()' is true (which means |
| 179 | * kthread_stop() has been called). The return value should be zero |
| 180 | * or a negative error number; it will be passed to kthread_stop(). |
| 181 | * |
| 182 | * Returns a task_struct or ERR_PTR(-ENOMEM). |
| 183 | */ |
| 184 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
| 185 | void *data, |
| 186 | int node, |
| 187 | const char namefmt[], |
| 188 | ...) |
| 189 | { |
| 190 | struct kthread_create_info create; |
| 191 | |
| 192 | #ifdef CONFIG_STACK_SIZE |
| 193 | create.stack_flags = 0; |
| 194 | if(((unsigned long) data) & 1) |
| 195 | { |
| 196 | create.stack_flags = 1; |
| 197 | data =(void *)((unsigned long) data - 1); |
| 198 | } |
| 199 | #endif |
| 200 | create.threadfn = threadfn; |
| 201 | create.data = data; |
| 202 | create.node = node; |
| 203 | init_completion(&create.done); |
| 204 | |
| 205 | spin_lock(&kthread_create_lock); |
| 206 | list_add_tail(&create.list, &kthread_create_list); |
| 207 | spin_unlock(&kthread_create_lock); |
| 208 | |
| 209 | wake_up_process(kthreadd_task); |
| 210 | wait_for_completion(&create.done); |
| 211 | |
| 212 | if (!IS_ERR(create.result)) { |
| 213 | static const struct sched_param param = { .sched_priority = 0 }; |
| 214 | va_list args; |
| 215 | |
| 216 | va_start(args, namefmt); |
| 217 | vsnprintf(create.result->comm, sizeof(create.result->comm), |
| 218 | namefmt, args); |
| 219 | va_end(args); |
| 220 | /* |
| 221 | * root may have changed our (kthreadd's) priority or CPU mask. |
| 222 | * The kernel thread should not inherit these properties. |
| 223 | */ |
| 224 | sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); |
| 225 | set_cpus_allowed_ptr(create.result, cpu_all_mask); |
| 226 | } |
| 227 | return create.result; |
| 228 | } |
| 229 | EXPORT_SYMBOL(kthread_create_on_node); |
| 230 | |
| 231 | /** |
| 232 | * kthread_bind - bind a just-created kthread to a cpu. |
| 233 | * @p: thread created by kthread_create(). |
| 234 | * @cpu: cpu (might not be online, must be possible) for @k to run on. |
| 235 | * |
| 236 | * Description: This function is equivalent to set_cpus_allowed(), |
| 237 | * except that @cpu doesn't need to be online, and the thread must be |
| 238 | * stopped (i.e., just returned from kthread_create()). |
| 239 | */ |
| 240 | void kthread_bind(struct task_struct *p, unsigned int cpu) |
| 241 | { |
| 242 | /* Must have done schedule() in kthread() before we set_task_cpu */ |
| 243 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { |
| 244 | WARN_ON(1); |
| 245 | return; |
| 246 | } |
| 247 | |
| 248 | /* It's safe because the task is inactive. */ |
| 249 | do_set_cpus_allowed(p, cpumask_of(cpu)); |
| 250 | p->flags |= PF_THREAD_BOUND; |
| 251 | } |
| 252 | EXPORT_SYMBOL(kthread_bind); |
| 253 | |
| 254 | /** |
| 255 | * kthread_stop - stop a thread created by kthread_create(). |
| 256 | * @k: thread created by kthread_create(). |
| 257 | * |
| 258 | * Sets kthread_should_stop() for @k to return true, wakes it, and |
| 259 | * waits for it to exit. This can also be called after kthread_create() |
| 260 | * instead of calling wake_up_process(): the thread will exit without |
| 261 | * calling threadfn(). |
| 262 | * |
| 263 | * If threadfn() may call do_exit() itself, the caller must ensure |
| 264 | * task_struct can't go away. |
| 265 | * |
| 266 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() |
| 267 | * was never called. |
| 268 | */ |
| 269 | int kthread_stop(struct task_struct *k) |
| 270 | { |
| 271 | struct kthread *kthread; |
| 272 | int ret; |
| 273 | |
| 274 | trace_sched_kthread_stop(k); |
| 275 | get_task_struct(k); |
| 276 | |
| 277 | kthread = to_kthread(k); |
| 278 | barrier(); /* it might have exited */ |
| 279 | if (k->vfork_done != NULL) { |
| 280 | kthread->should_stop = 1; |
| 281 | wake_up_process(k); |
| 282 | wait_for_completion(&kthread->exited); |
| 283 | } |
| 284 | ret = k->exit_code; |
| 285 | |
| 286 | put_task_struct(k); |
| 287 | trace_sched_kthread_stop_ret(ret); |
| 288 | |
| 289 | return ret; |
| 290 | } |
| 291 | EXPORT_SYMBOL(kthread_stop); |
| 292 | |
| 293 | int kthreadd(void *unused) |
| 294 | { |
| 295 | struct task_struct *tsk = current; |
| 296 | |
| 297 | /* Setup a clean context for our children to inherit. */ |
| 298 | set_task_comm(tsk, "kthreadd"); |
| 299 | ignore_signals(tsk); |
| 300 | set_cpus_allowed_ptr(tsk, cpu_all_mask); |
| 301 | set_mems_allowed(node_states[N_HIGH_MEMORY]); |
| 302 | |
| 303 | current->flags |= PF_NOFREEZE; |
| 304 | |
| 305 | for (;;) { |
| 306 | set_current_state(TASK_INTERRUPTIBLE); |
| 307 | if (list_empty(&kthread_create_list)) |
| 308 | schedule(); |
| 309 | __set_current_state(TASK_RUNNING); |
| 310 | |
| 311 | spin_lock(&kthread_create_lock); |
| 312 | while (!list_empty(&kthread_create_list)) { |
| 313 | struct kthread_create_info *create; |
| 314 | |
| 315 | create = list_entry(kthread_create_list.next, |
| 316 | struct kthread_create_info, list); |
| 317 | list_del_init(&create->list); |
| 318 | spin_unlock(&kthread_create_lock); |
| 319 | |
| 320 | create_kthread(create); |
| 321 | |
| 322 | spin_lock(&kthread_create_lock); |
| 323 | } |
| 324 | spin_unlock(&kthread_create_lock); |
| 325 | } |
| 326 | |
| 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | void __init_kthread_worker(struct kthread_worker *worker, |
| 331 | const char *name, |
| 332 | struct lock_class_key *key) |
| 333 | { |
| 334 | spin_lock_init(&worker->lock); |
| 335 | lockdep_set_class_and_name(&worker->lock, key, name); |
| 336 | INIT_LIST_HEAD(&worker->work_list); |
| 337 | worker->task = NULL; |
| 338 | } |
| 339 | EXPORT_SYMBOL_GPL(__init_kthread_worker); |
| 340 | |
| 341 | /** |
| 342 | * kthread_worker_fn - kthread function to process kthread_worker |
| 343 | * @worker_ptr: pointer to initialized kthread_worker |
| 344 | * |
| 345 | * This function can be used as @threadfn to kthread_create() or |
| 346 | * kthread_run() with @worker_ptr argument pointing to an initialized |
| 347 | * kthread_worker. The started kthread will process work_list until |
| 348 | * the it is stopped with kthread_stop(). A kthread can also call |
| 349 | * this function directly after extra initialization. |
| 350 | * |
| 351 | * Different kthreads can be used for the same kthread_worker as long |
| 352 | * as there's only one kthread attached to it at any given time. A |
| 353 | * kthread_worker without an attached kthread simply collects queued |
| 354 | * kthread_works. |
| 355 | */ |
| 356 | int kthread_worker_fn(void *worker_ptr) |
| 357 | { |
| 358 | struct kthread_worker *worker = worker_ptr; |
| 359 | struct kthread_work *work; |
| 360 | |
| 361 | WARN_ON(worker->task); |
| 362 | worker->task = current; |
| 363 | repeat: |
| 364 | set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ |
| 365 | |
| 366 | if (kthread_should_stop()) { |
| 367 | __set_current_state(TASK_RUNNING); |
| 368 | spin_lock_irq(&worker->lock); |
| 369 | worker->task = NULL; |
| 370 | spin_unlock_irq(&worker->lock); |
| 371 | return 0; |
| 372 | } |
| 373 | |
| 374 | work = NULL; |
| 375 | spin_lock_irq(&worker->lock); |
| 376 | if (!list_empty(&worker->work_list)) { |
| 377 | work = list_first_entry(&worker->work_list, |
| 378 | struct kthread_work, node); |
| 379 | list_del_init(&work->node); |
| 380 | } |
| 381 | worker->current_work = work; |
| 382 | spin_unlock_irq(&worker->lock); |
| 383 | |
| 384 | if (work) { |
| 385 | __set_current_state(TASK_RUNNING); |
| 386 | work->func(work); |
| 387 | } else if (!freezing(current)) |
| 388 | schedule(); |
| 389 | |
| 390 | try_to_freeze(); |
| 391 | goto repeat; |
| 392 | } |
| 393 | EXPORT_SYMBOL_GPL(kthread_worker_fn); |
| 394 | |
| 395 | /* insert @work before @pos in @worker */ |
| 396 | static void insert_kthread_work(struct kthread_worker *worker, |
| 397 | struct kthread_work *work, |
| 398 | struct list_head *pos) |
| 399 | { |
| 400 | lockdep_assert_held(&worker->lock); |
| 401 | |
| 402 | list_add_tail(&work->node, pos); |
| 403 | work->worker = worker; |
| 404 | if (likely(worker->task)) |
| 405 | wake_up_process(worker->task); |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * queue_kthread_work - queue a kthread_work |
| 410 | * @worker: target kthread_worker |
| 411 | * @work: kthread_work to queue |
| 412 | * |
| 413 | * Queue @work to work processor @task for async execution. @task |
| 414 | * must have been created with kthread_worker_create(). Returns %true |
| 415 | * if @work was successfully queued, %false if it was already pending. |
| 416 | */ |
| 417 | bool queue_kthread_work(struct kthread_worker *worker, |
| 418 | struct kthread_work *work) |
| 419 | { |
| 420 | bool ret = false; |
| 421 | unsigned long flags; |
| 422 | |
| 423 | spin_lock_irqsave(&worker->lock, flags); |
| 424 | if (list_empty(&work->node)) { |
| 425 | insert_kthread_work(worker, work, &worker->work_list); |
| 426 | ret = true; |
| 427 | } |
| 428 | spin_unlock_irqrestore(&worker->lock, flags); |
| 429 | return ret; |
| 430 | } |
| 431 | EXPORT_SYMBOL_GPL(queue_kthread_work); |
| 432 | |
| 433 | struct kthread_flush_work { |
| 434 | struct kthread_work work; |
| 435 | struct completion done; |
| 436 | }; |
| 437 | |
| 438 | static void kthread_flush_work_fn(struct kthread_work *work) |
| 439 | { |
| 440 | struct kthread_flush_work *fwork = |
| 441 | container_of(work, struct kthread_flush_work, work); |
| 442 | complete(&fwork->done); |
| 443 | } |
| 444 | |
| 445 | /** |
| 446 | * flush_kthread_work - flush a kthread_work |
| 447 | * @work: work to flush |
| 448 | * |
| 449 | * If @work is queued or executing, wait for it to finish execution. |
| 450 | */ |
| 451 | void flush_kthread_work(struct kthread_work *work) |
| 452 | { |
| 453 | struct kthread_flush_work fwork = { |
| 454 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), |
| 455 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), |
| 456 | }; |
| 457 | struct kthread_worker *worker; |
| 458 | bool noop = false; |
| 459 | |
| 460 | retry: |
| 461 | worker = work->worker; |
| 462 | if (!worker) |
| 463 | return; |
| 464 | |
| 465 | spin_lock_irq(&worker->lock); |
| 466 | if (work->worker != worker) { |
| 467 | spin_unlock_irq(&worker->lock); |
| 468 | goto retry; |
| 469 | } |
| 470 | |
| 471 | if (!list_empty(&work->node)) |
| 472 | insert_kthread_work(worker, &fwork.work, work->node.next); |
| 473 | else if (worker->current_work == work) |
| 474 | insert_kthread_work(worker, &fwork.work, worker->work_list.next); |
| 475 | else |
| 476 | noop = true; |
| 477 | |
| 478 | spin_unlock_irq(&worker->lock); |
| 479 | |
| 480 | if (!noop) |
| 481 | wait_for_completion(&fwork.done); |
| 482 | } |
| 483 | EXPORT_SYMBOL_GPL(flush_kthread_work); |
| 484 | |
| 485 | /** |
| 486 | * flush_kthread_worker - flush all current works on a kthread_worker |
| 487 | * @worker: worker to flush |
| 488 | * |
| 489 | * Wait until all currently executing or pending works on @worker are |
| 490 | * finished. |
| 491 | */ |
| 492 | void flush_kthread_worker(struct kthread_worker *worker) |
| 493 | { |
| 494 | struct kthread_flush_work fwork = { |
| 495 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), |
| 496 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), |
| 497 | }; |
| 498 | |
| 499 | queue_kthread_work(worker, &fwork.work); |
| 500 | wait_for_completion(&fwork.done); |
| 501 | } |
| 502 | EXPORT_SYMBOL_GPL(flush_kthread_worker); |