blob: 6b542018ed1e03c0b442586056c6fb32f46cdda1 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * Functions related to io context handling
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/slab.h>
11#include <linux/delay.h>
12
13#include "blk.h"
14
15/*
16 * For io context allocations
17 */
18static struct kmem_cache *iocontext_cachep;
19
20/**
21 * get_io_context - increment reference count to io_context
22 * @ioc: io_context to get
23 *
24 * Increment reference count to @ioc.
25 */
26void get_io_context(struct io_context *ioc)
27{
28 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29 atomic_long_inc(&ioc->refcount);
30}
31EXPORT_SYMBOL(get_io_context);
32
33static void icq_free_icq_rcu(struct rcu_head *head)
34{
35 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
36
37 kmem_cache_free(icq->__rcu_icq_cache, icq);
38}
39
40/* Exit an icq. Called with both ioc and q locked. */
41static void ioc_exit_icq(struct io_cq *icq)
42{
43 struct elevator_type *et = icq->q->elevator->type;
44
45 if (icq->flags & ICQ_EXITED)
46 return;
47
48 if (et->ops.elevator_exit_icq_fn)
49 et->ops.elevator_exit_icq_fn(icq);
50
51 icq->flags |= ICQ_EXITED;
52}
53
54/* Release an icq. Called with both ioc and q locked. */
55static void ioc_destroy_icq(struct io_cq *icq)
56{
57 struct io_context *ioc = icq->ioc;
58 struct request_queue *q = icq->q;
59 struct elevator_type *et = q->elevator->type;
60
61 lockdep_assert_held(&ioc->lock);
62 lockdep_assert_held(q->queue_lock);
63
64 radix_tree_delete(&ioc->icq_tree, icq->q->id);
65 hlist_del_init(&icq->ioc_node);
66 list_del_init(&icq->q_node);
67
68 /*
69 * Both setting lookup hint to and clearing it from @icq are done
70 * under queue_lock. If it's not pointing to @icq now, it never
71 * will. Hint assignment itself can race safely.
72 */
73 if (rcu_dereference_raw(ioc->icq_hint) == icq)
74 rcu_assign_pointer(ioc->icq_hint, NULL);
75
76 ioc_exit_icq(icq);
77
78 /*
79 * @icq->q might have gone away by the time RCU callback runs
80 * making it impossible to determine icq_cache. Record it in @icq.
81 */
82 icq->__rcu_icq_cache = et->icq_cache;
83 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
84}
85
86/*
87 * Slow path for ioc release in put_io_context(). Performs double-lock
88 * dancing to unlink all icq's and then frees ioc.
89 */
90static void ioc_release_fn(struct work_struct *work)
91{
92 struct io_context *ioc = container_of(work, struct io_context,
93 release_work);
94 unsigned long flags;
95
96 /*
97 * Exiting icq may call into put_io_context() through elevator
98 * which will trigger lockdep warning. The ioc's are guaranteed to
99 * be different, use a different locking subclass here. Use
100 * irqsave variant as there's no spin_lock_irq_nested().
101 */
102 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
103
104 while (!hlist_empty(&ioc->icq_list)) {
105 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
106 struct io_cq, ioc_node);
107 struct request_queue *q = icq->q;
108
109 if (spin_trylock(q->queue_lock)) {
110 ioc_destroy_icq(icq);
111 spin_unlock(q->queue_lock);
112 } else {
113 spin_unlock_irqrestore(&ioc->lock, flags);
114 cpu_chill();
115 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
116 }
117 }
118
119 spin_unlock_irqrestore(&ioc->lock, flags);
120
121 kmem_cache_free(iocontext_cachep, ioc);
122}
123
124/**
125 * put_io_context - put a reference of io_context
126 * @ioc: io_context to put
127 *
128 * Decrement reference count of @ioc and release it if the count reaches
129 * zero.
130 */
131void put_io_context(struct io_context *ioc)
132{
133 unsigned long flags;
134 bool free_ioc = false;
135
136 if (ioc == NULL)
137 return;
138
139 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
140
141 /*
142 * Releasing ioc requires reverse order double locking and we may
143 * already be holding a queue_lock. Do it asynchronously from wq.
144 */
145 if (atomic_long_dec_and_test(&ioc->refcount)) {
146 spin_lock_irqsave(&ioc->lock, flags);
147 if (!hlist_empty(&ioc->icq_list))
148 schedule_work(&ioc->release_work);
149 else
150 free_ioc = true;
151 spin_unlock_irqrestore(&ioc->lock, flags);
152 }
153
154 if (free_ioc)
155 kmem_cache_free(iocontext_cachep, ioc);
156}
157EXPORT_SYMBOL(put_io_context);
158
159/* Called by the exiting task */
160void exit_io_context(struct task_struct *task)
161{
162 struct io_context *ioc;
163 struct io_cq *icq;
164 struct hlist_node *n;
165 unsigned long flags;
166
167 task_lock(task);
168 ioc = task->io_context;
169 task->io_context = NULL;
170 task_unlock(task);
171
172 if (!atomic_dec_and_test(&ioc->nr_tasks)) {
173 put_io_context(ioc);
174 return;
175 }
176
177 /*
178 * Need ioc lock to walk icq_list and q lock to exit icq. Perform
179 * reverse double locking. Read comment in ioc_release_fn() for
180 * explanation on the nested locking annotation.
181 */
182retry:
183 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
184 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
185 if (icq->flags & ICQ_EXITED)
186 continue;
187 if (spin_trylock(icq->q->queue_lock)) {
188 ioc_exit_icq(icq);
189 spin_unlock(icq->q->queue_lock);
190 } else {
191 spin_unlock_irqrestore(&ioc->lock, flags);
192 cpu_chill();
193 goto retry;
194 }
195 }
196 spin_unlock_irqrestore(&ioc->lock, flags);
197
198 put_io_context(ioc);
199}
200
201/**
202 * ioc_clear_queue - break any ioc association with the specified queue
203 * @q: request_queue being cleared
204 *
205 * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
206 */
207void ioc_clear_queue(struct request_queue *q)
208{
209 lockdep_assert_held(q->queue_lock);
210
211 while (!list_empty(&q->icq_list)) {
212 struct io_cq *icq = list_entry(q->icq_list.next,
213 struct io_cq, q_node);
214 struct io_context *ioc = icq->ioc;
215
216 spin_lock(&ioc->lock);
217 ioc_destroy_icq(icq);
218 spin_unlock(&ioc->lock);
219 }
220}
221
222void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
223 int node)
224{
225 struct io_context *ioc;
226
227 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
228 node);
229 if (unlikely(!ioc))
230 return;
231
232 /* initialize */
233 atomic_long_set(&ioc->refcount, 1);
234 atomic_set(&ioc->nr_tasks, 1);
235 spin_lock_init(&ioc->lock);
236 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
237 INIT_HLIST_HEAD(&ioc->icq_list);
238 INIT_WORK(&ioc->release_work, ioc_release_fn);
239
240 /*
241 * Try to install. ioc shouldn't be installed if someone else
242 * already did or @task, which isn't %current, is exiting. Note
243 * that we need to allow ioc creation on exiting %current as exit
244 * path may issue IOs from e.g. exit_files(). The exit path is
245 * responsible for not issuing IO after exit_io_context().
246 */
247 task_lock(task);
248 if (!task->io_context &&
249 (task == current || !(task->flags & PF_EXITING)))
250 task->io_context = ioc;
251 else
252 kmem_cache_free(iocontext_cachep, ioc);
253 task_unlock(task);
254}
255
256/**
257 * get_task_io_context - get io_context of a task
258 * @task: task of interest
259 * @gfp_flags: allocation flags, used if allocation is necessary
260 * @node: allocation node, used if allocation is necessary
261 *
262 * Return io_context of @task. If it doesn't exist, it is created with
263 * @gfp_flags and @node. The returned io_context has its reference count
264 * incremented.
265 *
266 * This function always goes through task_lock() and it's better to use
267 * %current->io_context + get_io_context() for %current.
268 */
269struct io_context *get_task_io_context(struct task_struct *task,
270 gfp_t gfp_flags, int node)
271{
272 struct io_context *ioc;
273
274 might_sleep_if(gfp_flags & __GFP_WAIT);
275
276 do {
277 task_lock(task);
278 ioc = task->io_context;
279 if (likely(ioc)) {
280 get_io_context(ioc);
281 task_unlock(task);
282 return ioc;
283 }
284 task_unlock(task);
285 } while (create_io_context(task, gfp_flags, node));
286
287 return NULL;
288}
289EXPORT_SYMBOL(get_task_io_context);
290
291/**
292 * ioc_lookup_icq - lookup io_cq from ioc
293 * @ioc: the associated io_context
294 * @q: the associated request_queue
295 *
296 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
297 * with @q->queue_lock held.
298 */
299struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
300{
301 struct io_cq *icq;
302
303 lockdep_assert_held(q->queue_lock);
304
305 /*
306 * icq's are indexed from @ioc using radix tree and hint pointer,
307 * both of which are protected with RCU. All removals are done
308 * holding both q and ioc locks, and we're holding q lock - if we
309 * find a icq which points to us, it's guaranteed to be valid.
310 */
311 rcu_read_lock();
312 icq = rcu_dereference(ioc->icq_hint);
313 if (icq && icq->q == q)
314 goto out;
315
316 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
317 if (icq && icq->q == q)
318 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
319 else
320 icq = NULL;
321out:
322 rcu_read_unlock();
323 return icq;
324}
325EXPORT_SYMBOL(ioc_lookup_icq);
326
327/**
328 * ioc_create_icq - create and link io_cq
329 * @q: request_queue of interest
330 * @gfp_mask: allocation mask
331 *
332 * Make sure io_cq linking %current->io_context and @q exists. If either
333 * io_context and/or icq don't exist, they will be created using @gfp_mask.
334 *
335 * The caller is responsible for ensuring @ioc won't go away and @q is
336 * alive and will stay alive until this function returns.
337 */
338struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
339{
340 struct elevator_type *et = q->elevator->type;
341 struct io_context *ioc;
342 struct io_cq *icq;
343
344 /* allocate stuff */
345 ioc = create_io_context(current, gfp_mask, q->node);
346 if (!ioc)
347 return NULL;
348
349 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
350 q->node);
351 if (!icq)
352 return NULL;
353
354 if (radix_tree_preload(gfp_mask) < 0) {
355 kmem_cache_free(et->icq_cache, icq);
356 return NULL;
357 }
358
359 icq->ioc = ioc;
360 icq->q = q;
361 INIT_LIST_HEAD(&icq->q_node);
362 INIT_HLIST_NODE(&icq->ioc_node);
363
364 /* lock both q and ioc and try to link @icq */
365 spin_lock_irq(q->queue_lock);
366 spin_lock(&ioc->lock);
367
368 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
369 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
370 list_add(&icq->q_node, &q->icq_list);
371 if (et->ops.elevator_init_icq_fn)
372 et->ops.elevator_init_icq_fn(icq);
373 } else {
374 kmem_cache_free(et->icq_cache, icq);
375 icq = ioc_lookup_icq(ioc, q);
376 if (!icq)
377 printk(KERN_ERR "cfq: icq link failed!\n");
378 }
379
380 spin_unlock(&ioc->lock);
381 spin_unlock_irq(q->queue_lock);
382 radix_tree_preload_end();
383 return icq;
384}
385
386void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
387{
388 struct io_cq *icq;
389 struct hlist_node *n;
390
391 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
392 icq->flags |= flags;
393}
394
395/**
396 * ioc_ioprio_changed - notify ioprio change
397 * @ioc: io_context of interest
398 * @ioprio: new ioprio
399 *
400 * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
401 * icq's. iosched is responsible for checking the bit and applying it on
402 * request issue path.
403 */
404void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
405{
406 unsigned long flags;
407
408 spin_lock_irqsave(&ioc->lock, flags);
409 ioc->ioprio = ioprio;
410 ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
411 spin_unlock_irqrestore(&ioc->lock, flags);
412}
413
414/**
415 * ioc_cgroup_changed - notify cgroup change
416 * @ioc: io_context of interest
417 *
418 * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
419 * iosched is responsible for checking the bit and applying it on request
420 * issue path.
421 */
422void ioc_cgroup_changed(struct io_context *ioc)
423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&ioc->lock, flags);
427 ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
428 spin_unlock_irqrestore(&ioc->lock, flags);
429}
430EXPORT_SYMBOL(ioc_cgroup_changed);
431
432/**
433 * icq_get_changed - fetch and clear icq changed mask
434 * @icq: icq of interest
435 *
436 * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
437 * @icq->ioc->lock.
438 */
439unsigned icq_get_changed(struct io_cq *icq)
440{
441 unsigned int changed = 0;
442 unsigned long flags;
443
444 if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
445 spin_lock_irqsave(&icq->ioc->lock, flags);
446 changed = icq->flags & ICQ_CHANGED_MASK;
447 icq->flags &= ~ICQ_CHANGED_MASK;
448 spin_unlock_irqrestore(&icq->ioc->lock, flags);
449 }
450 return changed;
451}
452EXPORT_SYMBOL(icq_get_changed);
453
454static int __init blk_ioc_init(void)
455{
456 iocontext_cachep = kmem_cache_create("blkdev_ioc",
457 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
458 return 0;
459}
460subsys_initcall(blk_ioc_init);