blob: c996f755dba6ebcff094c1a975e2a54a58345f03 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
3 *
4 * Provides a framework for enqueuing callbacks from irq context
5 * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
6 */
7
8#include <linux/wait-simple.h>
9#include <linux/work-simple.h>
10#include <linux/kthread.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13
14#define SWORK_EVENT_PENDING (1 << 0)
15
16static DEFINE_MUTEX(worker_mutex);
17static struct sworker *glob_worker;
18
19struct sworker {
20 struct list_head events;
21 struct swait_head wq;
22
23 raw_spinlock_t lock;
24
25 struct task_struct *task;
26 int refs;
27};
28
29static bool swork_readable(struct sworker *worker)
30{
31 bool r;
32
33 if (kthread_should_stop())
34 return true;
35
36 raw_spin_lock_irq(&worker->lock);
37 r = !list_empty(&worker->events);
38 raw_spin_unlock_irq(&worker->lock);
39
40 return r;
41}
42
43static int swork_kthread(void *arg)
44{
45 struct sworker *worker = arg;
46
47 for (;;) {
48 swait_event_interruptible(worker->wq,
49 swork_readable(worker));
50 if (kthread_should_stop())
51 break;
52
53 raw_spin_lock_irq(&worker->lock);
54 while (!list_empty(&worker->events)) {
55 struct swork_event *sev;
56
57 sev = list_first_entry(&worker->events,
58 struct swork_event, item);
59 list_del(&sev->item);
60 raw_spin_unlock_irq(&worker->lock);
61
62 WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
63 &sev->flags));
64 sev->func(sev);
65 raw_spin_lock_irq(&worker->lock);
66 }
67 raw_spin_unlock_irq(&worker->lock);
68 }
69 return 0;
70}
71
72static struct sworker *swork_create(void)
73{
74 struct sworker *worker;
75
76 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
77 if (!worker)
78 return ERR_PTR(-ENOMEM);
79
80 INIT_LIST_HEAD(&worker->events);
81 raw_spin_lock_init(&worker->lock);
82 init_swait_head(&worker->wq);
83
84 worker->task = kthread_run(swork_kthread, worker, "kswork");
85 if (IS_ERR(worker->task)) {
86 kfree(worker);
87 return ERR_PTR(-ENOMEM);
88 }
89
90 return worker;
91}
92
93static void swork_destroy(struct sworker *worker)
94{
95 kthread_stop(worker->task);
96
97 WARN_ON(!list_empty(&worker->events));
98 kfree(worker);
99}
100
101/**
102 * swork_queue - queue swork
103 *
104 * Returns %false if @work was already on a queue, %true otherwise.
105 *
106 * The work is queued and processed on a random CPU
107 */
108bool swork_queue(struct swork_event *sev)
109{
110 unsigned long flags;
111
112 if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
113 return false;
114
115 raw_spin_lock_irqsave(&glob_worker->lock, flags);
116 list_add_tail(&sev->item, &glob_worker->events);
117 raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
118
119 swait_wake(&glob_worker->wq);
120 return true;
121}
122EXPORT_SYMBOL_GPL(swork_queue);
123
124/**
125 * swork_get - get an instance of the sworker
126 *
127 * Returns an negative error code if the initialization if the worker did not
128 * work, %0 otherwise.
129 *
130 */
131int swork_get(void)
132{
133 struct sworker *worker;
134
135 mutex_lock(&worker_mutex);
136 if (!glob_worker) {
137 worker = swork_create();
138 if (IS_ERR(worker)) {
139 mutex_unlock(&worker_mutex);
140 return -ENOMEM;
141 }
142
143 glob_worker = worker;
144 }
145
146 glob_worker->refs++;
147 mutex_unlock(&worker_mutex);
148
149 return 0;
150}
151EXPORT_SYMBOL_GPL(swork_get);
152
153/**
154 * swork_put - puts an instance of the sworker
155 *
156 * Will destroy the sworker thread. This function must not be called until all
157 * queued events have been completed.
158 */
159void swork_put(void)
160{
161 mutex_lock(&worker_mutex);
162
163 glob_worker->refs--;
164 if (glob_worker->refs > 0)
165 goto out;
166
167 swork_destroy(glob_worker);
168 glob_worker = NULL;
169out:
170 mutex_unlock(&worker_mutex);
171}
172EXPORT_SYMBOL_GPL(swork_put);